diff --git a/sdk/openai/ci.yml b/sdk/openai/ci.yml index 9aceace14deb..e2c9d5939b94 100644 --- a/sdk/openai/ci.yml +++ b/sdk/openai/ci.yml @@ -16,6 +16,8 @@ pr: - feature/* - release/* - hotfix/* + exclude: + - feature/v4 paths: include: - sdk/openai/ diff --git a/sdk/openai/openai/api-extractor.json b/sdk/openai/openai/api-extractor.json index 789a58770674..503203dcc456 100644 --- a/sdk/openai/openai/api-extractor.json +++ b/sdk/openai/openai/api-extractor.json @@ -1,18 +1,31 @@ { "$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json", "mainEntryPointFilePath": "./types/src/index.d.ts", - "docModel": { "enabled": true }, - "apiReport": { "enabled": true, "reportFolder": "./review" }, + "docModel": { + "enabled": true + }, + "apiReport": { + "enabled": true, + "reportFolder": "./review" + }, "dtsRollup": { "enabled": true, "untrimmedFilePath": "", "publicTrimmedFilePath": "./types/openai.d.ts" }, "messages": { - "tsdocMessageReporting": { "default": { "logLevel": "none" } }, + "tsdocMessageReporting": { + "default": { + "logLevel": "none" + } + }, "extractorMessageReporting": { - "ae-missing-release-tag": { "logLevel": "none" }, - "ae-unresolved-link": { "logLevel": "none" } + "ae-missing-release-tag": { + "logLevel": "none" + }, + "ae-unresolved-link": { + "logLevel": "none" + } } } -} +} \ No newline at end of file diff --git a/sdk/openai/openai/package.json b/sdk/openai/openai/package.json index 34a3ac2f3492..ac81cad86f6a 100644 --- a/sdk/openai/openai/package.json +++ b/sdk/openai/openai/package.json @@ -151,4 +151,4 @@ "Azure Cognitive Services instance": "https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource" } } -} +} \ No newline at end of file diff --git a/sdk/openai/openai/src/OpenAIClient.ts b/sdk/openai/openai/src/OpenAIClient.ts deleted file mode 100644 index c75962ba7af5..000000000000 --- a/sdk/openai/openai/src/OpenAIClient.ts +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { TokenCredential, KeyCredential, isTokenCredential } from "@azure/core-auth"; -import { - Completions, - ChatCompletions, - ImageGenerations, - Embeddings, - ChatRequestMessageUnion, - EventStream, -} from "./models/models.js"; -import { - GetCompletionsOptions, - GetChatCompletionsOptions, - GetEmbeddingsOptions, - GetImagesOptions, -} from "./models/options.js"; -import { createOpenAI, OpenAIClientOptions, OpenAIContext } from "./api/index.js"; -import { - getCompletions, - getChatCompletions, - getImageGenerations, - getEmbeddings, - getAudioTranscription, - getAudioTranslation, -} from "./api/operations.js"; -import { nonAzurePolicy } from "./api/policies/nonAzure.js"; -import { streamChatCompletions, streamCompletions } from "./api/operations.js"; -import { - GetAudioTranscriptionOptions, - AudioResultSimpleJson, - AudioResultFormat, - AudioResult, - GetAudioTranslationOptions, -} from "./models/audio.js"; - -function createOpenAIEndpoint(version: number): string { - return `https://api.openai.com/v${version}`; -} - -function isCred(cred: Record): cred is TokenCredential | KeyCredential { - return isTokenCredential(cred) || cred.key !== undefined; -} - -export { OpenAIClientOptions } from "./api/OpenAIContext.js"; -/** - * A client for interacting with Azure OpenAI. - * - * The client needs the endpoint of an OpenAI resource and an authentication - * method such as an API key or token. The API key and endpoint can be found in - * the OpenAI resource page. They will be located in the resource's Keys and Endpoint page. - * - * ### Examples for authentication: - * - * #### API Key - * - * ```js - * import { OpenAIClient } from "@azure/openai"; - * import { AzureKeyCredential } from "@azure/core-auth"; - * - * const endpoint = ""; - * const credential = new AzureKeyCredential(""); - * - * const client = new OpenAIClient(endpoint, credential); - * ``` - * - * #### Azure Active Directory - * - * ```js - * import { OpenAIClient } from "@azure/openai"; - * import { DefaultAzureCredential } from "@azure/identity"; - * - * const endpoint = ""; - * const credential = new DefaultAzureCredential(); - * - * const client = new OpenAIClient(endpoint, credential); - * ``` - */ -export class OpenAIClient { - private _client: OpenAIContext; - private _isAzure = false; - - /** - * Initializes an instance of OpenAIClient for use with an Azure OpenAI resource. - * @param endpoint - The URI for an Azure OpenAI resource, including protocol and hostname. - * For example: https://my-resource.openai.azure.com. - * @param credential - A key credential used to authenticate to an Azure OpenAI resource. - * @param options - The options for configuring the client. - * @remarks - * This constructor initializes an OpenAIClient object that can only be used with Azure OpenAI resources. - * To use OpenAIClient with a non-Azure OpenAI inference endpoint, use a constructor that accepts a non-Azure OpenAI API key instead. - */ - constructor(endpoint: string, credential: KeyCredential, options?: OpenAIClientOptions); - /** - * Initializes an instance of OpenAIClient for use with an Azure OpenAI resource. - * @param endpoint - The URI for an Azure OpenAI resource, including protocol and hostname. - * For example: https://my-resource.openai.azure.com. - * @param credential - A token credential used to authenticate with an Azure OpenAI resource. - * @param options - The options for configuring the client. - */ - constructor(endpoint: string, credential: TokenCredential, options?: OpenAIClientOptions); - /** - * Initializes an instance of OpenAIClient for use with the non-Azure OpenAI endpoint. - * @param openAiApiKey - The API key to use when connecting to the non-Azure OpenAI endpoint. - * @param options - The options for configuring the client. - * @remarks - * OpenAIClient objects initialized with this constructor can only be used with the non-Azure OpenAI inference endpoint. - * To use OpenAIClient with an Azure OpenAI resource, use a constructor that accepts a resource URI and Azure authentication credential instead. - */ - constructor(openAiApiKey: KeyCredential, options?: OpenAIClientOptions); - constructor( - endpointOrOpenAiKey: string | KeyCredential, - credOrOptions: KeyCredential | TokenCredential | OpenAIClientOptions = {}, - options: OpenAIClientOptions = {}, - ) { - let opts: OpenAIClientOptions; - let endpoint: string; - let cred: KeyCredential | TokenCredential; - if (isCred(credOrOptions)) { - endpoint = endpointOrOpenAiKey as string; - cred = credOrOptions; - opts = options; - this._isAzure = true; - } else { - endpoint = createOpenAIEndpoint(1); - cred = endpointOrOpenAiKey as KeyCredential; - const { credentials, ...restOpts } = credOrOptions; - opts = { - credentials: { - apiKeyHeaderName: credentials?.apiKeyHeaderName ?? "Authorization", - scopes: credentials?.scopes, - }, - ...restOpts, - }; - } - this._client = createOpenAI(endpoint, cred, { - ...opts, - ...(this._isAzure - ? {} - : { - additionalPolicies: [ - ...(opts.additionalPolicies ?? []), - { - position: "perCall", - policy: nonAzurePolicy(), - }, - ], - }), - }); - } - - private setModel(model: string, options: Record): void { - if (!this._isAzure) { - options.model = model; - } - } - - /** - * Returns the translation of an audio file. - * @param deploymentName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request. - * @param fileContent - The content of the audio file to translate. - * @param options - The options for this audio translation request. - * @returns The audio translation result. - */ - async getAudioTranslation( - deploymentName: string, - fileContent: Uint8Array, - options?: GetAudioTranslationOptions, - ): Promise; - /** - * Returns the translation of an audio file. - * @param deploymentName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request. - * @param fileContent - The content of the audio file to translate. - * @param format - The format of the result object. See {@link AudioResultFormat} for possible values. - * @param options - The options for this audio translation request. - * @returns The audio translation result. - */ - async getAudioTranslation( - deploymentName: string, - fileContent: Uint8Array, - format: Format, - options?: GetAudioTranslationOptions, - ): Promise>; - // implementation - async getAudioTranslation( - deploymentName: string, - fileContent: Uint8Array, - formatOrOptions?: Format | GetAudioTranslationOptions, - inputOptions?: GetAudioTranslationOptions, - ): Promise> { - const options = - inputOptions ?? (typeof formatOrOptions === "string" ? {} : formatOrOptions ?? {}); - const response_format = typeof formatOrOptions === "string" ? formatOrOptions : undefined; - this.setModel(deploymentName, options); - if (response_format === undefined) { - return getAudioTranslation(this._client, deploymentName, fileContent, options) as Promise< - AudioResult - >; - } - return getAudioTranslation(this._client, deploymentName, fileContent, response_format, options); - } - - /** - * Returns the transcription of an audio file in a simple JSON format. - * @param deploymentName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request. - * @param fileContent - The content of the audio file to transcribe. - * @param options - The options for this audio transcription request. - * @returns The audio transcription result in a simple JSON format. - */ - async getAudioTranscription( - deploymentName: string, - fileContent: Uint8Array, - options?: GetAudioTranscriptionOptions, - ): Promise; - /** - * Returns the transcription of an audio file. - * @param deploymentName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request. - * @param fileContent - The content of the audio file to transcribe. - * @param format - The format of the result object. See {@link AudioResultFormat} for possible values. - * @param options - The options for this audio transcription request. - * @returns The audio transcription result in a format of your choice. - */ - async getAudioTranscription( - deploymentName: string, - fileContent: Uint8Array, - format: Format, - options?: GetAudioTranscriptionOptions, - ): Promise>; - // implementation - async getAudioTranscription( - deploymentName: string, - fileContent: Uint8Array, - formatOrOptions?: Format | GetAudioTranscriptionOptions, - inputOptions?: GetAudioTranscriptionOptions, - ): Promise> { - const options = - inputOptions ?? (typeof formatOrOptions === "string" ? {} : formatOrOptions ?? {}); - const response_format = typeof formatOrOptions === "string" ? formatOrOptions : undefined; - this.setModel(deploymentName, options); - if (response_format === undefined) { - return getAudioTranscription(this._client, deploymentName, fileContent, options) as Promise< - AudioResult - >; - } - return getAudioTranscription( - this._client, - deploymentName, - fileContent, - response_format, - options, - ); - } - - /** - * Gets completions for the provided input prompts. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. - */ - getCompletions( - deploymentName: string, - prompt: string[], - options: GetCompletionsOptions = { requestOptions: {} }, - ): Promise { - this.setModel(deploymentName, options); - return getCompletions(this._client, deploymentName, prompt, options); - } - - /** - * Lists the completions tokens as they become available for a given prompt. - * @param deploymentName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request. - * @param prompt - The prompt to use for this request. - * @param options - The completions options for this completions request. - * @returns An asynchronous iterable of completions tokens. - */ - streamCompletions( - deploymentName: string, - prompt: string[], - options: GetCompletionsOptions = {}, - ): Promise>> { - this.setModel(deploymentName, options); - return streamCompletions(this._client, deploymentName, prompt, options); - } - - /** - * Gets chat completions for the provided chat messages. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. - */ - getChatCompletions( - deploymentName: string, - messages: ChatRequestMessageUnion[], - options: GetChatCompletionsOptions = { requestOptions: {} }, - ): Promise { - this.setModel(deploymentName, options); - return getChatCompletions(this._client, deploymentName, messages, options); - } - - /** - * Lists the chat completions tokens as they become available for a chat context. - * @param deploymentName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request. - * @param messages - The chat context messages to use for this request. - * @param options - The chat completions options for this chat completions request. - * @returns An asynchronous iterable of chat completions tokens. - */ - streamChatCompletions( - deploymentName: string, - messages: ChatRequestMessageUnion[], - options: GetChatCompletionsOptions = { requestOptions: {} }, - ): Promise> { - this.setModel(deploymentName, options); - return streamChatCompletions(this._client, deploymentName, messages, options); - } - - /** Creates an image given a prompt. */ - getImages( - deploymentName: string, - prompt: string, - options: GetImagesOptions = { requestOptions: {} }, - ): Promise { - this.setModel(deploymentName, options); - const { abortSignal, onResponse, requestOptions, tracingOptions, ...rest } = options; - return getImageGenerations(this._client, deploymentName, prompt, rest); - } - - /** Return the embeddings for a given prompt. */ - getEmbeddings( - deploymentName: string, - input: string[], - options: GetEmbeddingsOptions = { requestOptions: {} }, - ): Promise { - this.setModel(deploymentName, options); - return getEmbeddings(this._client, deploymentName, input, options); - } -} diff --git a/sdk/openai/openai/src/OpenAIKeyCredential.ts b/sdk/openai/openai/src/OpenAIKeyCredential.ts deleted file mode 100644 index faf0d7a62ab9..000000000000 --- a/sdk/openai/openai/src/OpenAIKeyCredential.ts +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { KeyCredential } from "@azure/core-auth"; - -/** - * The OpenAIKeyCredential class represents an OpenAI API key - * and is used to authenticate into an OpenAI client for - * an OpenAI endpoint. - */ -export class OpenAIKeyCredential implements KeyCredential { - private _key: string; - - /** - * Create an instance of an AzureKeyCredential for use - * with a service client. - * - * @param key - The initial value of the key to use in authentication - */ - constructor(key: string) { - if (!key) { - throw new Error("key must be a non-empty string"); - } - - this._key = createKey(key); - } - - /** - * The value of the key to be used in authentication - */ - public get key(): string { - return this._key; - } - - /** - * Change the value of the key. - * - * Updates will take effect upon the next request after - * updating the key value. - * - * @param newKey - The new key value to be used - */ - public update(newKey: string): void { - this._key = createKey(newKey); - } -} - -function createKey(key: string): string { - return key.startsWith("Bearer ") ? key : `Bearer ${key}`; -} diff --git a/sdk/openai/openai/src/api/OpenAIContext.ts b/sdk/openai/openai/src/api/OpenAIContext.ts deleted file mode 100644 index 8a3b4eaae998..000000000000 --- a/sdk/openai/openai/src/api/OpenAIContext.ts +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { TokenCredential, KeyCredential } from "@azure/core-auth"; -import { ClientOptions } from "@azure-rest/core-client"; -import { OpenAIContext } from "../rest/index.js"; -import getClient from "../rest/index.js"; - -/** Options for OpenAI Client */ -export interface OpenAIClientOptions extends ClientOptions {} - -export { OpenAIContext } from "../rest/index.js"; - -export function createOpenAI( - endpoint: string, - credential: KeyCredential | TokenCredential, - options: OpenAIClientOptions = {}, -): OpenAIContext { - const clientContext = getClient(endpoint, credential, options); - return clientContext; -} diff --git a/sdk/openai/openai/src/api/assistantsContext.ts b/sdk/openai/openai/src/api/assistantsContext.ts new file mode 100644 index 000000000000..100848abb40a --- /dev/null +++ b/sdk/openai/openai/src/api/assistantsContext.ts @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { TokenCredential, KeyCredential } from "@azure/core-auth"; +import { ClientOptions } from "@azure-rest/core-client"; +import { AssistantsContext } from "../rest/index.js"; +import getClient from "../rest/index.js"; + +/** Optional parameters for the client. */ +export interface AssistantsClientOptions extends ClientOptions {} + +export { AssistantsContext } from "../rest/index.js"; + +/** Azure OpenAI APIs for Assistants. */ +export function createAssistants( + endpointParam: string, + credential: KeyCredential | TokenCredential, + options: AssistantsClientOptions = {}, +): AssistantsContext { + const clientContext = getClient(endpointParam, credential, options); + return clientContext; +} diff --git a/sdk/openai/openai/src/api/getSSEs.browser.ts b/sdk/openai/openai/src/api/getSSEs.browser.ts deleted file mode 100644 index b2733f49e2c7..000000000000 --- a/sdk/openai/openai/src/api/getSSEs.browser.ts +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { StreamableMethod } from "@azure-rest/core-client"; -import { wrapError } from "./util.js"; -import { streamToText } from "./readableStreamUtils.js"; - -export async function getStream( - response: StreamableMethod, -): Promise> { - const { body, status } = await response.asBrowserStream(); - if (status !== "200" && body !== undefined) { - const text = await streamToText(body); - throw wrapError(() => JSON.parse(text).error, "Error parsing response body"); - } - - if (!body) throw new Error("No stream found in response. Did you enable the stream option?"); - return body; -} diff --git a/sdk/openai/openai/src/api/getSSEs.ts b/sdk/openai/openai/src/api/getSSEs.ts deleted file mode 100644 index 637bde913444..000000000000 --- a/sdk/openai/openai/src/api/getSSEs.ts +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { StreamableMethod } from "@azure-rest/core-client"; -import { RestError } from "@azure/core-rest-pipeline"; -import { wrapError } from "./util.js"; -import { IncomingMessage } from "http"; - -export async function getStream( - response: StreamableMethod, -): Promise { - const { body, status } = await response.asNodeStream(); - if (status !== "200" && body !== undefined) { - const text = await streamToText(body); - throw wrapError(() => JSON.parse(text).error, "Error parsing response body"); - } - - if (!body) throw new Error("No stream found in response. Did you enable the stream option?"); - return body as IncomingMessage; -} - -function streamToText(stream: NodeJS.ReadableStream): Promise { - return new Promise((resolve, reject) => { - const buffer: Buffer[] = []; - - stream.on("data", (chunk) => { - if (Buffer.isBuffer(chunk)) { - buffer.push(chunk); - } else { - buffer.push(Buffer.from(chunk)); - } - }); - stream.on("end", () => { - resolve(Buffer.concat(buffer).toString("utf8")); - }); - stream.on("error", (e) => { - if (e && e?.name === "AbortError") { - reject(e); - } else { - reject( - new RestError(`Error reading response as text: ${e.message}`, { - code: RestError.PARSE_ERROR, - }), - ); - } - }); - }); -} diff --git a/sdk/openai/openai/src/api/index.ts b/sdk/openai/openai/src/api/index.ts index 32912ac02219..6a2b1dd5728b 100644 --- a/sdk/openai/openai/src/api/index.ts +++ b/sdk/openai/openai/src/api/index.ts @@ -1,15 +1,50 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -export { createOpenAI, OpenAIClientOptions, OpenAIContext } from "./OpenAIContext.js"; export { - getAudioTranscription, - getAudioTranslation, - getCompletions, - getChatCompletions, - streamChatCompletions, - streamCompletions, - getImageGenerations, - getEmbeddings, + createAssistants, + AssistantsClientOptions, + AssistantsContext, +} from "./assistantsContext.js"; +export { + createAssistant, + listAssistants, + getAssistant, + updateAssistant, + deleteAssistant, + createThread, + getThread, + updateThread, + deleteThread, + createMessage, + listMessages, + getMessage, + updateMessage, + createRun, + listRuns, + getRun, + updateRun, + submitToolOutputsToRun, + cancelRun, + createThreadAndRun, + getRunStep, + listRunSteps, + listFiles, + uploadFile, + deleteFile, + getFile, + getFileContent, + listVectorStores, + createVectorStore, + getVectorStore, + modifyVectorStore, + deleteVectorStore, + listVectorStoreFiles, + createVectorStoreFile, + getVectorStoreFile, + deleteVectorStoreFile, + createVectorStoreFileBatch, + getVectorStoreFileBatch, + cancelVectorStoreFileBatch, + listVectorStoreFileBatchFiles, } from "./operations.js"; -export { isOpenAIError } from "./util.js"; diff --git a/sdk/openai/openai/src/api/oaiSse.ts b/sdk/openai/openai/src/api/oaiSse.ts deleted file mode 100644 index 4434f9ddbbd3..000000000000 --- a/sdk/openai/openai/src/api/oaiSse.ts +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { StreamableMethod } from "@azure-rest/core-client"; -import { getStream } from "./getSSEs.js"; -import { wrapError } from "./util.js"; -import { EventStream } from "../models/models.js"; -import { EventMessage, createSseStream } from "@azure/core-sse"; -import { polyfillStream } from "./readableStreamUtils.js"; - -export async function getOaiSSEs>( - response: StreamableMethod, - toEvent: (obj: O) => TEvent, -): Promise> { - const stringStream = await getStream(response); - const eventStream = createSseStream(stringStream); - const jsonParser = new TransformStream({ - transform: async (chunk, controller) => { - if (chunk.data === "[DONE]") { - return; - } - controller.enqueue( - toEvent( - wrapError( - () => JSON.parse(chunk.data), - "Error parsing an event. See 'cause' for more details", - ), - ), - ); - }, - }); - /** TODO: remove these polyfills once all supported runtimes support them */ - return polyfillStream(eventStream.pipeThrough(jsonParser)); -} diff --git a/sdk/openai/openai/src/api/operations.ts b/sdk/openai/openai/src/api/operations.ts index 0b2883f03fbc..c6c388baa6e9 100644 --- a/sdk/openai/openai/src/api/operations.ts +++ b/sdk/openai/openai/src/api/operations.ts @@ -2,682 +2,2905 @@ // Licensed under the MIT license. import { - CompletionsOptions, - Completions, - ChatCompletionsOptions, - ChatCompletions, - ImageGenerationOptions, - ImageGenerations, - EmbeddingsOptions, - Embeddings, - ChatRequestMessageUnion, - EventStream, - ContentFilterResultsForChoice, - ContentFilterResultDetailsForPrompt, - ContentFilterResultsForPrompt, + createToolResourcesOptionsSerializer, + updateToolResourcesOptionsSerializer, + assistantThreadCreationOptionsSerializer, + threadMessageOptionsSerializer, + messageAttachmentSerializer, + threadMessageSerializer, + truncationObjectSerializer, + toolOutputSerializer, + vectorStoreExpirationPolicySerializer, + AssistantCreationOptions, + Assistant, + OpenAIPageableListOfAssistant, + UpdateAssistantOptions, + AssistantDeletionStatus, + AssistantThreadCreationOptions, + ThreadMessageOptions, + MessageRole, + AssistantThread, + UpdateAssistantThreadOptions, + ThreadDeletionStatus, + ThreadMessage, + MessageStatus, + MessageIncompleteDetailsReason, + OpenAIPageableListOfThreadMessage, + CreateRunOptions, + TruncationStrategy, + ThreadRun, + RunStatus, + IncompleteRunDetails, + OpenAIPageableListOfThreadRun, + ToolOutput, + CreateAndRunThreadOptions, + RunStep, + RunStepType, + RunStepStatus, + RunStepErrorCode, + OpenAIPageableListOfRunStep, + FilePurpose, + FileListResponse, + OpenAIFile, + FileState, + FileDeletionStatus, + OpenAIPageableListOfVectorStore, + VectorStore, + VectorStoreStatus, + VectorStoreExpirationPolicyAnchor, + VectorStoreOptions, + VectorStoreUpdateOptions, + VectorStoreDeletionStatus, + OpenAIPageableListOfVectorStoreFile, + VectorStoreFile, + VectorStoreFileStatus, + VectorStoreFileErrorCode, + VectorStoreFileDeletionStatus, + VectorStoreFileBatch, + VectorStoreFileBatchStatus, } from "../models/models.js"; import { - serializeChatRequestMessageUnion, - serializeAzureChatExtensionConfigurationUnion, -} from "../utils/serializeUtil.js"; + deserializeMessageContentUnion, + deserializeRequiredActionUnion, + deserializeRunStepDetailsUnion, +} from "../utils/deserializeUtil.js"; import { - GetChatCompletions200Response, - GetChatCompletionsDefaultResponse, - GetCompletions200Response, - GetCompletionsDefaultResponse, - GetEmbeddings200Response, - GetEmbeddingsDefaultResponse, - GetImageGenerations200Response, - GetImageGenerationsDefaultResponse, - isUnexpected, - OpenAIContext as Client, - ContentFilterResultsForChoiceOutput, - ContentFilterResultDetailsForPromptOutput, - ContentFilterResultsForPromptOutput, - ChatCompletionsOutput, - CompletionsOutput, + AssistantsContext as Client, + CancelRun200Response, + CancelVectorStoreFileBatch200Response, + CreateAssistant200Response, + CreateMessage200Response, + CreateRun200Response, + CreateThread200Response, + CreateThreadAndRun200Response, + CreateVectorStore200Response, + CreateVectorStoreFile200Response, + CreateVectorStoreFileBatch200Response, + DeleteAssistant200Response, + DeleteFile200Response, + DeleteThread200Response, + DeleteVectorStore200Response, + DeleteVectorStoreFile200Response, + GetAssistant200Response, + GetFile200Response, + GetFileContent200Response, + GetMessage200Response, + GetRun200Response, + GetRunStep200Response, + GetThread200Response, + GetVectorStore200Response, + GetVectorStoreFile200Response, + GetVectorStoreFileBatch200Response, + ListAssistants200Response, + ListFiles200Response, + ListMessages200Response, + ListRuns200Response, + ListRunSteps200Response, + ListVectorStoreFileBatchFiles200Response, + ListVectorStoreFiles200Response, + ListVectorStores200Response, + ModifyVectorStore200Response, + SubmitToolOutputsToRun200Response, + UpdateAssistant200Response, + UpdateMessage200Response, + UpdateRun200Response, + UpdateThread200Response, + UploadFile200Response, } from "../rest/index.js"; import { StreamableMethod, operationOptionsToRequestParameters, - ErrorModel, + createRestError, } from "@azure-rest/core-client"; +import { uint8ArrayToString, stringToUint8Array } from "@azure/core-util"; +import { serializeRecord } from "../helpers/serializerHelpers.js"; import { - GetCompletionsOptions, - GetChatCompletionsOptions, - GetEmbeddingsOptions, - GetImagesOptions, - GetImageGenerationsOptions, - GeneratedGetChatCompletionsOptions, + CreateAssistantOptionalParams, + ListAssistantsOptionalParams, + GetAssistantOptionalParams, + UpdateAssistantOptionalParams, + DeleteAssistantOptionalParams, + CreateThreadOptionalParams, + GetThreadOptionalParams, + UpdateThreadOptionalParams, + DeleteThreadOptionalParams, + CreateMessageOptionalParams, + ListMessagesOptionalParams, + GetMessageOptionalParams, + UpdateMessageOptionalParams, + CreateRunOptionalParams, + ListRunsOptionalParams, + GetRunOptionalParams, + UpdateRunOptionalParams, + SubmitToolOutputsToRunOptionalParams, + CancelRunOptionalParams, + CreateThreadAndRunOptionalParams, + GetRunStepOptionalParams, + ListRunStepsOptionalParams, + ListFilesOptionalParams, + UploadFileOptionalParams, + DeleteFileOptionalParams, + GetFileOptionalParams, + GetFileContentOptionalParams, + ListVectorStoresOptionalParams, + CreateVectorStoreOptionalParams, + GetVectorStoreOptionalParams, + ModifyVectorStoreOptionalParams, + DeleteVectorStoreOptionalParams, + ListVectorStoreFilesOptionalParams, + CreateVectorStoreFileOptionalParams, + GetVectorStoreFileOptionalParams, + DeleteVectorStoreFileOptionalParams, + CreateVectorStoreFileBatchOptionalParams, + GetVectorStoreFileBatchOptionalParams, + CancelVectorStoreFileBatchOptionalParams, + ListVectorStoreFileBatchFilesOptionalParams, } from "../models/options.js"; -import { getOaiSSEs } from "./oaiSse.js"; -import { createFile } from "@azure/core-rest-pipeline"; -import { - GetAudioTranscriptionOptions, - AudioResultSimpleJson, - AudioResultFormat, - AudioResult, - GetAudioTranslationOptions, -} from "../models/audio.js"; -import { snakeCaseKeys, camelCaseKeys, createOpenAIError } from "./util.js"; -/** - * Returns the transcription of an audio file in a simple JSON format. - * @param context - The context containing the client to use for this request. - * @param deploymentName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request. - * @param fileContent - The content of the audio file to transcribe. - * @param options - The options for this audio transcription request. - * @returns The audio transcription result in a simple JSON format. - */ -export async function getAudioTranscription( +export function _createAssistantSend( context: Client, - deploymentName: string, - fileContent: Uint8Array, - options?: GetAudioTranscriptionOptions, -): Promise; -/** - * Returns the transcription of an audio file. - * @param context - The context containing the client to use for this request. - * @param deploymentName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request. - * @param fileContent - The content of the audio file to transcribe. - * @param format - The format of the result object. See {@link AudioResultFormat} for possible values. - * @param options - The options for this audio transcription request. - * @returns The audio transcription result in a format of your choice. - */ -export async function getAudioTranscription( - context: Client, - deploymentName: string, - fileContent: Uint8Array, - format: Format, - options?: GetAudioTranscriptionOptions, -): Promise>; -// implementation -export async function getAudioTranscription( - context: Client, - deploymentName: string, - fileContent: Uint8Array, - formatOrOptions?: Format | GetAudioTranscriptionOptions, - inputOptions?: GetAudioTranscriptionOptions, -): Promise> { - const options = - inputOptions ?? (typeof formatOrOptions === "string" ? {} : formatOrOptions ?? {}); - const response_format = typeof formatOrOptions === "string" ? formatOrOptions : undefined; - const { abortSignal, onResponse, requestOptions, tracingOptions, ...rest } = options; - const { body, status } = await context - .pathUnchecked("deployments/{deploymentName}/audio/transcriptions", deploymentName) + body: AssistantCreationOptions, + options: CreateAssistantOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/assistants") .post({ - ...operationOptionsToRequestParameters({ - abortSignal, - onResponse, - tracingOptions, - requestOptions, - }), - contentType: "multipart/form-data", + ...operationOptionsToRequestParameters(options), body: { - ...snakeCaseKeys(rest), - file: createFile(fileContent, "placeholder.wav"), - ...(response_format ? { response_format } : {}), + model: body["model"], + name: body["name"], + description: body["description"], + instructions: body["instructions"], + tools: body["tools"], + tool_resources: !body.toolResources + ? body.toolResources + : createToolResourcesOptionsSerializer(body.toolResources), + temperature: body["temperature"], + top_p: body["topP"], + response_format: body["responseFormat"], + metadata: !body.metadata + ? body.metadata + : (serializeRecord(body.metadata as any) as any), }, }); - if (status !== "200") { - throw body.error; +} + +export async function _createAssistantDeserialize( + result: CreateAssistant200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); } - return response_format !== "verbose_json" - ? body - : (camelCaseKeys(body) as unknown as AudioResult); + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + name: result.body["name"], + description: result.body["description"], + model: result.body["model"], + instructions: result.body["instructions"], + tools: result.body["tools"], + toolResources: + result.body.tool_resources === null + ? null + : { + codeInterpreter: !result.body.tool_resources.code_interpreter + ? undefined + : { + fileIds: + result.body.tool_resources.code_interpreter?.["file_ids"], + }, + fileSearch: !result.body.tool_resources.file_search + ? undefined + : { + vectorStoreIds: + result.body.tool_resources.file_search?.[ + "vector_store_ids" + ], + }, + }, + temperature: result.body["temperature"], + topP: result.body["top_p"], + responseFormat: result.body["response_format"], + metadata: result.body["metadata"], + }; } -/** - * Returns the translation of an audio file. - * @param context - The context containing the client to use for this request. - * @param deploymentName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request. - * @param fileContent - The content of the audio file to translate. - * @param options - The options for this audio translation request. - * @returns The audio translation result. - */ -export async function getAudioTranslation( +/** Creates a new assistant. */ +export async function createAssistant( context: Client, - deploymentName: string, - fileContent: Uint8Array, - options?: GetAudioTranslationOptions, -): Promise; -/** - * Returns the translation of an audio file. - * @param context - The context containing the client to use for this request. - * @param deploymentName - The name of the model deployment (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request. - * @param fileContent - The content of the audio file to translate. - * @param format - The format of the result object. See {@link AudioResultFormat} for possible values. - * @param options - The options for this audio translation request. - * @returns The audio translation result. - */ -export async function getAudioTranslation( - context: Client, - deploymentName: string, - fileContent: Uint8Array, - format: Format, - options?: GetAudioTranslationOptions, -): Promise>; -// implementation -export async function getAudioTranslation( - context: Client, - deploymentName: string, - fileContent: Uint8Array, - formatOrOptions?: Format | GetAudioTranslationOptions, - inputOptions?: GetAudioTranslationOptions, -): Promise> { - const options = - inputOptions ?? (typeof formatOrOptions === "string" ? {} : formatOrOptions ?? {}); - const response_format = typeof formatOrOptions === "string" ? formatOrOptions : undefined; - const { abortSignal, onResponse, requestOptions, tracingOptions, ...rest } = options; - const { body, status } = await context - .pathUnchecked("deployments/{deploymentName}/audio/translations", deploymentName) - .post({ - ...operationOptionsToRequestParameters({ - abortSignal, - onResponse, - tracingOptions, - requestOptions, - }), - contentType: "multipart/form-data", - body: { - ...snakeCaseKeys(rest), - file: createFile(fileContent, "placeholder.wav"), - ...(response_format ? { response_format } : {}), + body: AssistantCreationOptions, + options: CreateAssistantOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createAssistantSend(context, body, options); + return _createAssistantDeserialize(result); +} + +export function _listAssistantsSend( + context: Client, + options: ListAssistantsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/assistants") + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + limit: options?.limit, + order: options?.order, + after: options?.after, + before: options?.before, }, }); - if (status !== "200") { - throw body.error; - } - return response_format !== "verbose_json" - ? body - : (camelCaseKeys(body) as unknown as AudioResult); -} - -export function _getCompletionsSend( - context: Client, - deploymentId: string, - body: CompletionsOptions, - options: GetCompletionsOptions = { requestOptions: {} }, -): StreamableMethod { - return context.path("/deployments/{deploymentId}/completions", deploymentId).post({ - ...operationOptionsToRequestParameters(options), - body: { - prompt: body["prompt"], - max_tokens: body["maxTokens"], - temperature: body["temperature"], - top_p: body["topP"], - logit_bias: body["logitBias"], - user: body["user"], - n: body["n"], - logprobs: body["logprobs"], - suffix: body["suffix"], - echo: body["echo"], - stop: body["stop"], - presence_penalty: body["presencePenalty"], - frequency_penalty: body["frequencyPenalty"], - best_of: body["bestOf"], - stream: body["stream"], - model: body["model"], - }, - }); } -export async function _getCompletionsDeserialize( - result: GetCompletions200Response | GetCompletionsDefaultResponse, -): Promise { - if (isUnexpected(result)) { - throw createOpenAIError(result); +export async function _listAssistantsDeserialize( + result: ListAssistants200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); } - return getCompletionsResult(result.body); -} - -export function getCompletionsResult( - body: CompletionsOutput & ContentFilterResultsForPromptX, -): Completions { - const { created, choices, prompt_filter_results, prompt_annotations, ...rest } = body; return { - ...camelCaseKeys(rest), - created: new Date(created), - ...{ - promptFilterResults: getContentFilterResultsForPrompt({ - prompt_filter_results, - prompt_annotations, - }), - }, - choices: choices.map(({ content_filter_results, ...choice }) => ({ - ...camelCaseKeys(choice), - ...(!content_filter_results - ? {} - : { - contentFilterResults: parseContentFilterResultsForChoiceOutput(content_filter_results), - }), + object: result.body["object"], + data: result.body["data"].map((p) => ({ + id: p["id"], + object: p["object"], + createdAt: new Date(p["created_at"]), + name: p["name"], + description: p["description"], + model: p["model"], + instructions: p["instructions"], + tools: p["tools"], + toolResources: + p.tool_resources === null + ? null + : { + codeInterpreter: !p.tool_resources.code_interpreter + ? undefined + : { fileIds: p.tool_resources.code_interpreter?.["file_ids"] }, + fileSearch: !p.tool_resources.file_search + ? undefined + : { + vectorStoreIds: + p.tool_resources.file_search?.["vector_store_ids"], + }, + }, + temperature: p["temperature"], + topP: p["top_p"], + responseFormat: p["response_format"], + metadata: p["metadata"], })), + firstId: result.body["first_id"], + lastId: result.body["last_id"], + hasMore: result.body["has_more"], }; } -/** - * Gets completions for the provided input prompts. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. - */ -export async function getCompletions( - context: Client, - deploymentId: string, - prompt: string[], - options: GetCompletionsOptions = { requestOptions: {} }, -): Promise { - const { abortSignal, onResponse, requestOptions, tracingOptions, ...rest } = options; - const result = await _getCompletionsSend( - context, - deploymentId, - { prompt, ...rest }, - { abortSignal, onResponse, requestOptions, tracingOptions }, - ); - return _getCompletionsDeserialize(result); +/** Gets a list of assistants that were previously created. */ +export async function listAssistants( + context: Client, + options: ListAssistantsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _listAssistantsSend(context, options); + return _listAssistantsDeserialize(result); } -export function streamCompletions( +export function _getAssistantSend( context: Client, - deploymentName: string, - prompt: string[], - options: GetCompletionsOptions = { requestOptions: {} }, -): Promise>> { - const { abortSignal, onResponse, requestOptions, tracingOptions, ...rest } = options; - const response = _getCompletionsSend( - context, - deploymentName, - { - prompt, - ...rest, - stream: true, - }, - { abortSignal, onResponse, requestOptions, tracingOptions }, - ); - return getOaiSSEs(response, getCompletionsResult); -} - -export function _getChatCompletionsSend( - context: Client, - deploymentId: string, - body: ChatCompletionsOptions, - options: GeneratedGetChatCompletionsOptions = { requestOptions: {} }, -): StreamableMethod { - return context.path("/deployments/{deploymentId}/chat/completions", deploymentId).post({ - ...operationOptionsToRequestParameters(options), - body: { - model: body["model"], - stream: body["stream"], - max_tokens: body["maxTokens"], - temperature: body["temperature"], - top_p: body["topP"], - logit_bias: body["logitBias"], - user: body["user"], - n: body["n"], - stop: body["stop"], - presence_penalty: body["presencePenalty"], - frequency_penalty: body["frequencyPenalty"], - data_sources: - body["dataSources"] === undefined - ? body["dataSources"] - : body["dataSources"].map((p) => serializeAzureChatExtensionConfigurationUnion(p)), - enhancements: !body.enhancements - ? undefined - : { - grounding: !body.enhancements?.grounding - ? undefined - : { enabled: body.enhancements?.grounding?.["enabled"] }, - ocr: !body.enhancements?.ocr - ? undefined - : { enabled: body.enhancements?.ocr?.["enabled"] }, - }, - seed: body["seed"], - logprobs: body["logprobs"], - top_logprobs: body["topLogprobs"], - response_format: !body.responseFormat ? undefined : { type: body.responseFormat?.["type"] }, - tool_choice: body["toolChoice"], - tools: body["tools"], - functions: - body["functions"] === undefined - ? body["functions"] - : body["functions"].map((p) => ({ - name: p["name"], - description: p["description"], - parameters: p["parameters"], - })), - function_call: body["functionCall"], - messages: body["messages"].map((p) => serializeChatRequestMessageUnion(p)), - }, - }); + assistantId: string, + options: GetAssistantOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/assistants/{assistantId}", assistantId) + .get({ ...operationOptionsToRequestParameters(options) }); } -export async function _getChatCompletionsDeserialize( - result: GetChatCompletions200Response | GetChatCompletionsDefaultResponse, -): Promise { - if (isUnexpected(result)) { - throw createOpenAIError(result); +export async function _getAssistantDeserialize( + result: GetAssistant200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); } - return getChatCompletionsResult(result.body); -} -export function getChatCompletionsResult( - body: ChatCompletionsOutput & ContentFilterResultsForPromptX, -): ChatCompletions { - const { created, choices, prompt_filter_results, prompt_annotations, usage, ...rest } = body; return { - ...camelCaseKeys(rest), - created: new Date(created), - ...{ - promptFilterResults: getContentFilterResultsForPrompt({ - prompt_filter_results, - prompt_annotations, - }), - }, - ...(!usage - ? {} - : { - usage: { - completionTokens: usage["completion_tokens"], - promptTokens: usage["prompt_tokens"], - totalTokens: usage["total_tokens"], + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + name: result.body["name"], + description: result.body["description"], + model: result.body["model"], + instructions: result.body["instructions"], + tools: result.body["tools"], + toolResources: + result.body.tool_resources === null + ? null + : { + codeInterpreter: !result.body.tool_resources.code_interpreter + ? undefined + : { + fileIds: + result.body.tool_resources.code_interpreter?.["file_ids"], + }, + fileSearch: !result.body.tool_resources.file_search + ? undefined + : { + vectorStoreIds: + result.body.tool_resources.file_search?.[ + "vector_store_ids" + ], + }, }, - }), - choices: !choices - ? [] - : choices.map(({ content_filter_results, ...choice }) => ({ - ...camelCaseKeys(choice), - ...(!content_filter_results - ? {} - : { - contentFilterResults: - parseContentFilterResultsForChoiceOutput(content_filter_results), - }), - })), + temperature: result.body["temperature"], + topP: result.body["top_p"], + responseFormat: result.body["response_format"], + metadata: result.body["metadata"], }; } -/** - * Gets chat completions for the provided chat messages. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. - */ -export async function getChatCompletions( - context: Client, - deploymentName: string, - messages: ChatRequestMessageUnion[], - options: GetChatCompletionsOptions = { requestOptions: {} }, -): Promise { - const result = await _getChatCompletionsSendX(context, deploymentName, messages, options); - return _getChatCompletionsDeserialize(result); -} - -function _getChatCompletionsSendX( - context: Client, - deploymentName: string, - messages: ChatRequestMessageUnion[], - options: GetChatCompletionsOptions & { stream?: boolean } = { requestOptions: {} }, -): StreamableMethod { - const { - azureExtensionOptions, - abortSignal, - onResponse, - requestOptions, - tracingOptions, - ...rest - } = options; - const coreOptions = { - abortSignal, - onResponse, - requestOptions, - tracingOptions, - }; - const azure = { - ...(!azureExtensionOptions?.extensions - ? {} - : { dataSources: azureExtensionOptions.extensions }), - ...(!azureExtensionOptions?.enhancements - ? {} - : { enhancements: azureExtensionOptions.enhancements }), - }; - return _getChatCompletionsSend( - context, - deploymentName, - { messages, ...rest, ...azure }, - coreOptions, - ); +/** Retrieves an existing assistant. */ +export async function getAssistant( + context: Client, + assistantId: string, + options: GetAssistantOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getAssistantSend(context, assistantId, options); + return _getAssistantDeserialize(result); } -export function streamChatCompletions( - context: Client, - deploymentName: string, - messages: ChatRequestMessageUnion[], - options: GetChatCompletionsOptions = { requestOptions: {} }, -): Promise> { - const response = _getChatCompletionsSendX(context, deploymentName, messages, { - ...options, - stream: true, - }); - return getOaiSSEs(response, getChatCompletionsResult); -} - -export function _getImageGenerationsSend( - context: Client, - deploymentId: string, - body: ImageGenerationOptions, - options: GetImageGenerationsOptions = { requestOptions: {} }, -): StreamableMethod { - return context.path("/deployments/{deploymentId}/images/generations", deploymentId).post({ - ...operationOptionsToRequestParameters(options), - body: { - model: body["model"], - prompt: body["prompt"], - n: body["n"], - size: body["size"], - response_format: body["responseFormat"], - quality: body["quality"], - style: body["style"], - user: body["user"], - }, - }); +export function _updateAssistantSend( + context: Client, + assistantId: string, + body: UpdateAssistantOptions, + options: UpdateAssistantOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/assistants/{assistantId}", assistantId) + .post({ + ...operationOptionsToRequestParameters(options), + body: { + model: body["model"], + name: body["name"], + description: body["description"], + instructions: body["instructions"], + tools: body["tools"], + tool_resources: !body.toolResources + ? body.toolResources + : updateToolResourcesOptionsSerializer(body.toolResources), + temperature: body["temperature"], + top_p: body["topP"], + response_format: body["responseFormat"], + metadata: !body.metadata + ? body.metadata + : (serializeRecord(body.metadata as any) as any), + }, + }); } -export async function _getImageGenerationsDeserialize( - result: GetImageGenerations200Response | GetImageGenerationsDefaultResponse, -): Promise { - if (isUnexpected(result)) { - throw createOpenAIError(result); +export async function _updateAssistantDeserialize( + result: UpdateAssistant200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); } return { - created: new Date(result.body["created"]), - data: result.body["data"].map((p) => ({ - url: p["url"], - base64Data: p["b64_json"], - contentFilterResults: !p.content_filter_results - ? undefined + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + name: result.body["name"], + description: result.body["description"], + model: result.body["model"], + instructions: result.body["instructions"], + tools: result.body["tools"], + toolResources: + result.body.tool_resources === null + ? null : { - sexual: !p.content_filter_results?.sexual + codeInterpreter: !result.body.tool_resources.code_interpreter ? undefined : { - severity: p.content_filter_results?.sexual?.["severity"], - filtered: p.content_filter_results?.sexual?.["filtered"], + fileIds: + result.body.tool_resources.code_interpreter?.["file_ids"], }, - violence: !p.content_filter_results?.violence + fileSearch: !result.body.tool_resources.file_search ? undefined : { - severity: p.content_filter_results?.violence?.["severity"], - filtered: p.content_filter_results?.violence?.["filtered"], - }, - hate: !p.content_filter_results?.hate - ? undefined - : { - severity: p.content_filter_results?.hate?.["severity"], - filtered: p.content_filter_results?.hate?.["filtered"], - }, - selfHarm: !p.content_filter_results?.self_harm - ? undefined - : { - severity: p.content_filter_results?.self_harm?.["severity"], - filtered: p.content_filter_results?.self_harm?.["filtered"], + vectorStoreIds: + result.body.tool_resources.file_search?.[ + "vector_store_ids" + ], }, }, - revisedPrompt: p["revised_prompt"], - promptFilterResults: !p.prompt_filter_results - ? undefined + temperature: result.body["temperature"], + topP: result.body["top_p"], + responseFormat: result.body["response_format"], + metadata: result.body["metadata"], + }; +} + +/** Modifies an existing assistant. */ +export async function updateAssistant( + context: Client, + assistantId: string, + body: UpdateAssistantOptions, + options: UpdateAssistantOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _updateAssistantSend( + context, + assistantId, + body, + options, + ); + return _updateAssistantDeserialize(result); +} + +export function _deleteAssistantSend( + context: Client, + assistantId: string, + options: DeleteAssistantOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/assistants/{assistantId}", assistantId) + .delete({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _deleteAssistantDeserialize( + result: DeleteAssistant200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + deleted: result.body["deleted"], + object: result.body["object"], + }; +} + +/** Deletes an assistant. */ +export async function deleteAssistant( + context: Client, + assistantId: string, + options: DeleteAssistantOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteAssistantSend(context, assistantId, options); + return _deleteAssistantDeserialize(result); +} + +export function _createThreadSend( + context: Client, + body: AssistantThreadCreationOptions, + options: CreateThreadOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads") + .post({ + ...operationOptionsToRequestParameters(options), + body: { + messages: + body["messages"] === undefined + ? body["messages"] + : body["messages"].map(threadMessageOptionsSerializer), + tool_resources: !body.toolResources + ? body.toolResources + : createToolResourcesOptionsSerializer(body.toolResources), + metadata: !body.metadata + ? body.metadata + : (serializeRecord(body.metadata as any) as any), + }, + }); +} + +export async function _createThreadDeserialize( + result: CreateThread200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + toolResources: + result.body.tool_resources === null + ? null : { - sexual: !p.prompt_filter_results?.sexual + codeInterpreter: !result.body.tool_resources.code_interpreter ? undefined : { - severity: p.prompt_filter_results?.sexual?.["severity"], - filtered: p.prompt_filter_results?.sexual?.["filtered"], + fileIds: + result.body.tool_resources.code_interpreter?.["file_ids"], }, - violence: !p.prompt_filter_results?.violence + fileSearch: !result.body.tool_resources.file_search ? undefined : { - severity: p.prompt_filter_results?.violence?.["severity"], - filtered: p.prompt_filter_results?.violence?.["filtered"], + vectorStoreIds: + result.body.tool_resources.file_search?.[ + "vector_store_ids" + ], }, - hate: !p.prompt_filter_results?.hate + }, + metadata: result.body["metadata"], + }; +} + +/** Creates a new thread. Threads contain messages and can be run by assistants. */ +export async function createThread( + context: Client, + body: AssistantThreadCreationOptions, + options: CreateThreadOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createThreadSend(context, body, options); + return _createThreadDeserialize(result); +} + +export function _getThreadSend( + context: Client, + threadId: string, + options: GetThreadOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads/{threadId}", threadId) + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _getThreadDeserialize( + result: GetThread200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + toolResources: + result.body.tool_resources === null + ? null + : { + codeInterpreter: !result.body.tool_resources.code_interpreter ? undefined : { - severity: p.prompt_filter_results?.hate?.["severity"], - filtered: p.prompt_filter_results?.hate?.["filtered"], + fileIds: + result.body.tool_resources.code_interpreter?.["file_ids"], }, - selfHarm: !p.prompt_filter_results?.self_harm + fileSearch: !result.body.tool_resources.file_search ? undefined : { - severity: p.prompt_filter_results?.self_harm?.["severity"], - filtered: p.prompt_filter_results?.self_harm?.["filtered"], + vectorStoreIds: + result.body.tool_resources.file_search?.[ + "vector_store_ids" + ], }, - profanity: !p.prompt_filter_results?.profanity + }, + metadata: result.body["metadata"], + }; +} + +/** Gets information about an existing thread. */ +export async function getThread( + context: Client, + threadId: string, + options: GetThreadOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getThreadSend(context, threadId, options); + return _getThreadDeserialize(result); +} + +export function _updateThreadSend( + context: Client, + threadId: string, + body: UpdateAssistantThreadOptions, + options: UpdateThreadOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads/{threadId}", threadId) + .post({ + ...operationOptionsToRequestParameters(options), + body: { + tool_resources: !body.toolResources + ? body.toolResources + : updateToolResourcesOptionsSerializer(body.toolResources), + metadata: !body.metadata + ? body.metadata + : (serializeRecord(body.metadata as any) as any), + }, + }); +} + +export async function _updateThreadDeserialize( + result: UpdateThread200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + toolResources: + result.body.tool_resources === null + ? null + : { + codeInterpreter: !result.body.tool_resources.code_interpreter ? undefined : { - filtered: p.prompt_filter_results?.profanity?.["filtered"], - detected: p.prompt_filter_results?.profanity?.["detected"], + fileIds: + result.body.tool_resources.code_interpreter?.["file_ids"], }, - jailbreak: !p.prompt_filter_results?.jailbreak + fileSearch: !result.body.tool_resources.file_search ? undefined : { - filtered: p.prompt_filter_results?.jailbreak?.["filtered"], - detected: p.prompt_filter_results?.jailbreak?.["detected"], + vectorStoreIds: + result.body.tool_resources.file_search?.[ + "vector_store_ids" + ], }, }, - })), + metadata: result.body["metadata"], + }; +} + +/** Modifies an existing thread. */ +export async function updateThread( + context: Client, + threadId: string, + body: UpdateAssistantThreadOptions, + options: UpdateThreadOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _updateThreadSend(context, threadId, body, options); + return _updateThreadDeserialize(result); +} + +export function _deleteThreadSend( + context: Client, + threadId: string, + options: DeleteThreadOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads/{threadId}", threadId) + .delete({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _deleteThreadDeserialize( + result: DeleteThread200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + deleted: result.body["deleted"], + object: result.body["object"], + }; +} + +/** Deletes an existing thread. */ +export async function deleteThread( + context: Client, + threadId: string, + options: DeleteThreadOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteThreadSend(context, threadId, options); + return _deleteThreadDeserialize(result); +} + +export function _createMessageSend( + context: Client, + threadId: string, + threadMessageOptions: ThreadMessageOptions, + options: CreateMessageOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads/{threadId}/messages", threadId) + .post({ + ...operationOptionsToRequestParameters(options), + body: { + role: threadMessageOptions["role"], + content: threadMessageOptions["content"], + attachments: + threadMessageOptions["attachments"] === undefined || + threadMessageOptions["attachments"] === null + ? threadMessageOptions["attachments"] + : threadMessageOptions["attachments"].map( + messageAttachmentSerializer, + ), + metadata: !threadMessageOptions.metadata + ? threadMessageOptions.metadata + : (serializeRecord(threadMessageOptions.metadata as any) as any), + }, + }); +} + +export async function _createMessageDeserialize( + result: CreateMessage200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + threadId: result.body["thread_id"], + status: result.body["status"] as MessageStatus, + incompleteDetails: + result.body.incomplete_details === null + ? null + : { + reason: result.body.incomplete_details[ + "reason" + ] as MessageIncompleteDetailsReason, + }, + completedAt: + result.body["completed_at"] === null + ? null + : new Date(result.body["completed_at"]), + incompleteAt: + result.body["incomplete_at"] === null + ? null + : new Date(result.body["incomplete_at"]), + role: result.body["role"] as MessageRole, + content: result.body["content"].map((p) => + deserializeMessageContentUnion(p), + ), + assistantId: result.body["assistant_id"], + runId: result.body["run_id"], + attachments: + result.body["attachments"] === null + ? result.body["attachments"] + : result.body["attachments"].map((p) => ({ + fileId: p["file_id"], + tools: p["tools"], + })), + metadata: result.body["metadata"], }; } -/** Creates an image given a prompt. */ -export async function getImageGenerations( +/** Creates a new message on a specified thread. */ +export async function createMessage( context: Client, - deploymentId: string, - prompt: string, - options: GetImagesOptions = { requestOptions: {} }, -): Promise { - const { abortSignal, onResponse, requestOptions, tracingOptions, ...rest } = options; - const result = await _getImageGenerationsSend( + threadId: string, + threadMessageOptions: ThreadMessageOptions, + options: CreateMessageOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createMessageSend( context, - deploymentId, - { prompt, ...rest }, - { abortSignal, onResponse, requestOptions, tracingOptions }, + threadId, + threadMessageOptions, + options, ); - return _getImageGenerationsDeserialize(result); -} - -export function _getEmbeddingsSend( - context: Client, - deploymentId: string, - body: EmbeddingsOptions, - options: GetEmbeddingsOptions = { requestOptions: {} }, -): StreamableMethod { - return context.path("/deployments/{deploymentId}/embeddings", deploymentId).post({ - ...operationOptionsToRequestParameters(options), - body: { - user: body["user"], - model: body["model"], - input: body["input"], - dimensions: body["dimensions"], - }, - }); + return _createMessageDeserialize(result); +} + +export function _listMessagesSend( + context: Client, + threadId: string, + options: ListMessagesOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads/{threadId}/messages", threadId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + runId: options?.runId, + limit: options?.limit, + order: options?.order, + after: options?.after, + before: options?.before, + }, + }); } -export async function _getEmbeddingsDeserialize( - result: GetEmbeddings200Response | GetEmbeddingsDefaultResponse, -): Promise { - if (isUnexpected(result)) { - throw createOpenAIError(result); +export async function _listMessagesDeserialize( + result: ListMessages200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); } return { + object: result.body["object"], data: result.body["data"].map((p) => ({ - embedding: p["embedding"], - index: p["index"], + id: p["id"], + object: p["object"], + createdAt: new Date(p["created_at"]), + threadId: p["thread_id"], + status: p["status"] as MessageStatus, + incompleteDetails: + p.incomplete_details === null + ? null + : { + reason: p.incomplete_details[ + "reason" + ] as MessageIncompleteDetailsReason, + }, + completedAt: + p["completed_at"] === null ? null : new Date(p["completed_at"]), + incompleteAt: + p["incomplete_at"] === null ? null : new Date(p["incomplete_at"]), + role: p["role"] as MessageRole, + content: p["content"].map((p) => deserializeMessageContentUnion(p)), + assistantId: p["assistant_id"], + runId: p["run_id"], + attachments: + p["attachments"] === null + ? p["attachments"] + : p["attachments"].map((p) => ({ + fileId: p["file_id"], + tools: p["tools"], + })), + metadata: p["metadata"], })), - usage: { - promptTokens: result.body.usage["prompt_tokens"], - totalTokens: result.body.usage["total_tokens"], - }, + firstId: result.body["first_id"], + lastId: result.body["last_id"], + hasMore: result.body["has_more"], + }; +} + +/** Gets a list of messages that exist on a thread. */ +export async function listMessages( + context: Client, + threadId: string, + options: ListMessagesOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _listMessagesSend(context, threadId, options); + return _listMessagesDeserialize(result); +} + +export function _getMessageSend( + context: Client, + threadId: string, + messageId: string, + options: GetMessageOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads/{threadId}/messages/{messageId}", threadId, messageId) + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _getMessageDeserialize( + result: GetMessage200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + threadId: result.body["thread_id"], + status: result.body["status"] as MessageStatus, + incompleteDetails: + result.body.incomplete_details === null + ? null + : { + reason: result.body.incomplete_details[ + "reason" + ] as MessageIncompleteDetailsReason, + }, + completedAt: + result.body["completed_at"] === null + ? null + : new Date(result.body["completed_at"]), + incompleteAt: + result.body["incomplete_at"] === null + ? null + : new Date(result.body["incomplete_at"]), + role: result.body["role"] as MessageRole, + content: result.body["content"].map((p) => + deserializeMessageContentUnion(p), + ), + assistantId: result.body["assistant_id"], + runId: result.body["run_id"], + attachments: + result.body["attachments"] === null + ? result.body["attachments"] + : result.body["attachments"].map((p) => ({ + fileId: p["file_id"], + tools: p["tools"], + })), + metadata: result.body["metadata"], + }; +} + +/** Gets an existing message from an existing thread. */ +export async function getMessage( + context: Client, + threadId: string, + messageId: string, + options: GetMessageOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getMessageSend(context, threadId, messageId, options); + return _getMessageDeserialize(result); +} + +export function _updateMessageSend( + context: Client, + threadId: string, + messageId: string, + options: UpdateMessageOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads/{threadId}/messages/{messageId}", threadId, messageId) + .post({ + ...operationOptionsToRequestParameters(options), + body: { metadata: options?.metadata }, + }); +} + +export async function _updateMessageDeserialize( + result: UpdateMessage200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + threadId: result.body["thread_id"], + status: result.body["status"] as MessageStatus, + incompleteDetails: + result.body.incomplete_details === null + ? null + : { + reason: result.body.incomplete_details[ + "reason" + ] as MessageIncompleteDetailsReason, + }, + completedAt: + result.body["completed_at"] === null + ? null + : new Date(result.body["completed_at"]), + incompleteAt: + result.body["incomplete_at"] === null + ? null + : new Date(result.body["incomplete_at"]), + role: result.body["role"] as MessageRole, + content: result.body["content"].map((p) => + deserializeMessageContentUnion(p), + ), + assistantId: result.body["assistant_id"], + runId: result.body["run_id"], + attachments: + result.body["attachments"] === null + ? result.body["attachments"] + : result.body["attachments"].map((p) => ({ + fileId: p["file_id"], + tools: p["tools"], + })), + metadata: result.body["metadata"], }; } -/** Return the embeddings for a given prompt. */ -export async function getEmbeddings( +/** Modifies an existing message on an existing thread. */ +export async function updateMessage( context: Client, - deploymentId: string, - input: string[], - options: GetEmbeddingsOptions = { requestOptions: {} }, -): Promise { - const { abortSignal, onResponse, requestOptions, tracingOptions, ...rest } = options; - const result = await _getEmbeddingsSend( + threadId: string, + messageId: string, + options: UpdateMessageOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _updateMessageSend( context, - deploymentId, - { input, ...rest }, - { abortSignal, onResponse, requestOptions, tracingOptions }, + threadId, + messageId, + options, ); - return _getEmbeddingsDeserialize(result); + return _updateMessageDeserialize(result); +} + +export function _createRunSend( + context: Client, + threadId: string, + createRunOptions: CreateRunOptions, + options: CreateRunOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads/{threadId}/runs", threadId) + .post({ + ...operationOptionsToRequestParameters(options), + body: { + assistant_id: createRunOptions["assistantId"], + model: createRunOptions["model"], + instructions: createRunOptions["instructions"], + additional_instructions: createRunOptions["additionalInstructions"], + additional_messages: + createRunOptions["additionalMessages"] === undefined || + createRunOptions["additionalMessages"] === null + ? createRunOptions["additionalMessages"] + : createRunOptions["additionalMessages"].map( + threadMessageSerializer, + ), + tools: createRunOptions["tools"], + stream: createRunOptions["stream"], + temperature: createRunOptions["temperature"], + top_p: createRunOptions["topP"], + max_prompt_tokens: createRunOptions["maxPromptTokens"], + max_completion_tokens: createRunOptions["maxCompletionTokens"], + truncation_strategy: !createRunOptions.truncationStrategy + ? createRunOptions.truncationStrategy + : truncationObjectSerializer(createRunOptions.truncationStrategy), + tool_choice: createRunOptions["toolChoice"], + response_format: createRunOptions["responseFormat"], + metadata: !createRunOptions.metadata + ? createRunOptions.metadata + : (serializeRecord(createRunOptions.metadata as any) as any), + }, + }); } -type ContentFilterResultsForPromptX = { - prompt_filter_results?: Array; - prompt_annotations?: Array; -}; +export async function _createRunDeserialize( + result: CreateRun200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + threadId: result.body["thread_id"], + assistantId: result.body["assistant_id"], + status: result.body["status"] as RunStatus, + requiredAction: !result.body.requiredAction + ? result.body.requiredAction + : deserializeRequiredActionUnion(result.body.required_action), + lastError: + result.body.last_error === null + ? null + : { + code: result.body.last_error["code"], + message: result.body.last_error["message"], + }, + model: result.body["model"], + instructions: result.body["instructions"], + tools: result.body["tools"], + createdAt: new Date(result.body["created_at"]), + expiresAt: + result.body["expires_at"] === null + ? null + : new Date(result.body["expires_at"]), + startedAt: + result.body["started_at"] === null + ? null + : new Date(result.body["started_at"]), + completedAt: + result.body["completed_at"] === null + ? null + : new Date(result.body["completed_at"]), + cancelledAt: + result.body["cancelled_at"] === null + ? null + : new Date(result.body["cancelled_at"]), + failedAt: + result.body["failed_at"] === null + ? null + : new Date(result.body["failed_at"]), + incompleteDetails: result.body[ + "incomplete_details" + ] as IncompleteRunDetails, + usage: + result.body.usage === null + ? null + : { + completionTokens: result.body.usage["completion_tokens"], + promptTokens: result.body.usage["prompt_tokens"], + totalTokens: result.body.usage["total_tokens"], + }, + temperature: result.body["temperature"], + topP: result.body["top_p"], + maxPromptTokens: result.body["max_prompt_tokens"], + maxCompletionTokens: result.body["max_completion_tokens"], + truncationStrategy: + result.body.truncation_strategy === null + ? null + : { + type: result.body.truncation_strategy["type"] as TruncationStrategy, + lastMessages: result.body.truncation_strategy["last_messages"], + }, + toolChoice: result.body["tool_choice"], + responseFormat: result.body["response_format"], + metadata: result.body["metadata"], + }; +} -function getContentFilterResultsForPrompt({ - prompt_annotations, - prompt_filter_results, -}: ContentFilterResultsForPromptX): ContentFilterResultsForPrompt[] | undefined { - const res = prompt_filter_results ?? prompt_annotations; - return res?.map(({ content_filter_results, ...rest }) => ({ - ...camelCaseKeys(rest), - contentFilterResults: parseContentFilterResultDetailsForPromptOutput(content_filter_results), - })); +/** Creates a new run for an assistant thread. */ +export async function createRun( + context: Client, + threadId: string, + createRunOptions: CreateRunOptions, + options: CreateRunOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createRunSend( + context, + threadId, + createRunOptions, + options, + ); + return _createRunDeserialize(result); } -function parseContentFilterResultDetailsForPromptOutput({ - error, - ...rest -}: ContentFilterResultDetailsForPromptOutput = {}): ContentFilterResultDetailsForPrompt { - return error ? parseError(error) : camelCaseKeys(rest); +export function _listRunsSend( + context: Client, + threadId: string, + options: ListRunsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads/{threadId}/runs", threadId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + limit: options?.limit, + order: options?.order, + after: options?.after, + before: options?.before, + }, + }); } -function parseError(error: ErrorModel): { error: ErrorModel } { +export async function _listRunsDeserialize( + result: ListRuns200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + return { - error: { - ...error, - details: error["details"] ?? [], - }, + object: result.body["object"], + data: result.body["data"].map((p) => ({ + id: p["id"], + object: p["object"], + threadId: p["thread_id"], + assistantId: p["assistant_id"], + status: p["status"] as RunStatus, + requiredAction: !p.requiredAction + ? p.requiredAction + : deserializeRequiredActionUnion(p.required_action), + lastError: + p.last_error === null + ? null + : { code: p.last_error["code"], message: p.last_error["message"] }, + model: p["model"], + instructions: p["instructions"], + tools: p["tools"], + createdAt: new Date(p["created_at"]), + expiresAt: p["expires_at"] === null ? null : new Date(p["expires_at"]), + startedAt: p["started_at"] === null ? null : new Date(p["started_at"]), + completedAt: + p["completed_at"] === null ? null : new Date(p["completed_at"]), + cancelledAt: + p["cancelled_at"] === null ? null : new Date(p["cancelled_at"]), + failedAt: p["failed_at"] === null ? null : new Date(p["failed_at"]), + incompleteDetails: p["incomplete_details"] as IncompleteRunDetails, + usage: + p.usage === null + ? null + : { + completionTokens: p.usage["completion_tokens"], + promptTokens: p.usage["prompt_tokens"], + totalTokens: p.usage["total_tokens"], + }, + temperature: p["temperature"], + topP: p["top_p"], + maxPromptTokens: p["max_prompt_tokens"], + maxCompletionTokens: p["max_completion_tokens"], + truncationStrategy: + p.truncation_strategy === null + ? null + : { + type: p.truncation_strategy["type"] as TruncationStrategy, + lastMessages: p.truncation_strategy["last_messages"], + }, + toolChoice: p["tool_choice"], + responseFormat: p["response_format"], + metadata: p["metadata"], + })), + firstId: result.body["first_id"], + lastId: result.body["last_id"], + hasMore: result.body["has_more"], }; } -function parseContentFilterResultsForChoiceOutput({ - error, - ...successResult -}: ContentFilterResultsForChoiceOutput = {}): ContentFilterResultsForChoice { - return error - ? { - error: { - ...error, - details: error["details"] ?? [], - }, - } - : camelCaseKeys(successResult); +/** Gets a list of runs for a specified thread. */ +export async function listRuns( + context: Client, + threadId: string, + options: ListRunsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _listRunsSend(context, threadId, options); + return _listRunsDeserialize(result); +} + +export function _getRunSend( + context: Client, + threadId: string, + runId: string, + options: GetRunOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads/{threadId}/runs/{runId}", threadId, runId) + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _getRunDeserialize( + result: GetRun200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + threadId: result.body["thread_id"], + assistantId: result.body["assistant_id"], + status: result.body["status"] as RunStatus, + requiredAction: !result.body.requiredAction + ? result.body.requiredAction + : deserializeRequiredActionUnion(result.body.required_action), + lastError: + result.body.last_error === null + ? null + : { + code: result.body.last_error["code"], + message: result.body.last_error["message"], + }, + model: result.body["model"], + instructions: result.body["instructions"], + tools: result.body["tools"], + createdAt: new Date(result.body["created_at"]), + expiresAt: + result.body["expires_at"] === null + ? null + : new Date(result.body["expires_at"]), + startedAt: + result.body["started_at"] === null + ? null + : new Date(result.body["started_at"]), + completedAt: + result.body["completed_at"] === null + ? null + : new Date(result.body["completed_at"]), + cancelledAt: + result.body["cancelled_at"] === null + ? null + : new Date(result.body["cancelled_at"]), + failedAt: + result.body["failed_at"] === null + ? null + : new Date(result.body["failed_at"]), + incompleteDetails: result.body[ + "incomplete_details" + ] as IncompleteRunDetails, + usage: + result.body.usage === null + ? null + : { + completionTokens: result.body.usage["completion_tokens"], + promptTokens: result.body.usage["prompt_tokens"], + totalTokens: result.body.usage["total_tokens"], + }, + temperature: result.body["temperature"], + topP: result.body["top_p"], + maxPromptTokens: result.body["max_prompt_tokens"], + maxCompletionTokens: result.body["max_completion_tokens"], + truncationStrategy: + result.body.truncation_strategy === null + ? null + : { + type: result.body.truncation_strategy["type"] as TruncationStrategy, + lastMessages: result.body.truncation_strategy["last_messages"], + }, + toolChoice: result.body["tool_choice"], + responseFormat: result.body["response_format"], + metadata: result.body["metadata"], + }; +} + +/** Gets an existing run from an existing thread. */ +export async function getRun( + context: Client, + threadId: string, + runId: string, + options: GetRunOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getRunSend(context, threadId, runId, options); + return _getRunDeserialize(result); +} + +export function _updateRunSend( + context: Client, + threadId: string, + runId: string, + options: UpdateRunOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads/{threadId}/runs/{runId}", threadId, runId) + .post({ + ...operationOptionsToRequestParameters(options), + body: { metadata: options?.metadata }, + }); +} + +export async function _updateRunDeserialize( + result: UpdateRun200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + threadId: result.body["thread_id"], + assistantId: result.body["assistant_id"], + status: result.body["status"] as RunStatus, + requiredAction: !result.body.requiredAction + ? result.body.requiredAction + : deserializeRequiredActionUnion(result.body.required_action), + lastError: + result.body.last_error === null + ? null + : { + code: result.body.last_error["code"], + message: result.body.last_error["message"], + }, + model: result.body["model"], + instructions: result.body["instructions"], + tools: result.body["tools"], + createdAt: new Date(result.body["created_at"]), + expiresAt: + result.body["expires_at"] === null + ? null + : new Date(result.body["expires_at"]), + startedAt: + result.body["started_at"] === null + ? null + : new Date(result.body["started_at"]), + completedAt: + result.body["completed_at"] === null + ? null + : new Date(result.body["completed_at"]), + cancelledAt: + result.body["cancelled_at"] === null + ? null + : new Date(result.body["cancelled_at"]), + failedAt: + result.body["failed_at"] === null + ? null + : new Date(result.body["failed_at"]), + incompleteDetails: result.body[ + "incomplete_details" + ] as IncompleteRunDetails, + usage: + result.body.usage === null + ? null + : { + completionTokens: result.body.usage["completion_tokens"], + promptTokens: result.body.usage["prompt_tokens"], + totalTokens: result.body.usage["total_tokens"], + }, + temperature: result.body["temperature"], + topP: result.body["top_p"], + maxPromptTokens: result.body["max_prompt_tokens"], + maxCompletionTokens: result.body["max_completion_tokens"], + truncationStrategy: + result.body.truncation_strategy === null + ? null + : { + type: result.body.truncation_strategy["type"] as TruncationStrategy, + lastMessages: result.body.truncation_strategy["last_messages"], + }, + toolChoice: result.body["tool_choice"], + responseFormat: result.body["response_format"], + metadata: result.body["metadata"], + }; +} + +/** Modifies an existing thread run. */ +export async function updateRun( + context: Client, + threadId: string, + runId: string, + options: UpdateRunOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _updateRunSend(context, threadId, runId, options); + return _updateRunDeserialize(result); +} + +export function _submitToolOutputsToRunSend( + context: Client, + threadId: string, + runId: string, + toolOutputs: ToolOutput[], + options: SubmitToolOutputsToRunOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path( + "/threads/{threadId}/runs/{runId}/submit_tool_outputs", + threadId, + runId, + ) + .post({ + ...operationOptionsToRequestParameters(options), + body: { + tool_outputs: toolOutputs.map(toolOutputSerializer), + stream: options?.stream, + }, + }); +} + +export async function _submitToolOutputsToRunDeserialize( + result: SubmitToolOutputsToRun200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + threadId: result.body["thread_id"], + assistantId: result.body["assistant_id"], + status: result.body["status"] as RunStatus, + requiredAction: !result.body.requiredAction + ? result.body.requiredAction + : deserializeRequiredActionUnion(result.body.required_action), + lastError: + result.body.last_error === null + ? null + : { + code: result.body.last_error["code"], + message: result.body.last_error["message"], + }, + model: result.body["model"], + instructions: result.body["instructions"], + tools: result.body["tools"], + createdAt: new Date(result.body["created_at"]), + expiresAt: + result.body["expires_at"] === null + ? null + : new Date(result.body["expires_at"]), + startedAt: + result.body["started_at"] === null + ? null + : new Date(result.body["started_at"]), + completedAt: + result.body["completed_at"] === null + ? null + : new Date(result.body["completed_at"]), + cancelledAt: + result.body["cancelled_at"] === null + ? null + : new Date(result.body["cancelled_at"]), + failedAt: + result.body["failed_at"] === null + ? null + : new Date(result.body["failed_at"]), + incompleteDetails: result.body[ + "incomplete_details" + ] as IncompleteRunDetails, + usage: + result.body.usage === null + ? null + : { + completionTokens: result.body.usage["completion_tokens"], + promptTokens: result.body.usage["prompt_tokens"], + totalTokens: result.body.usage["total_tokens"], + }, + temperature: result.body["temperature"], + topP: result.body["top_p"], + maxPromptTokens: result.body["max_prompt_tokens"], + maxCompletionTokens: result.body["max_completion_tokens"], + truncationStrategy: + result.body.truncation_strategy === null + ? null + : { + type: result.body.truncation_strategy["type"] as TruncationStrategy, + lastMessages: result.body.truncation_strategy["last_messages"], + }, + toolChoice: result.body["tool_choice"], + responseFormat: result.body["response_format"], + metadata: result.body["metadata"], + }; +} + +/** Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. */ +export async function submitToolOutputsToRun( + context: Client, + threadId: string, + runId: string, + toolOutputs: ToolOutput[], + options: SubmitToolOutputsToRunOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _submitToolOutputsToRunSend( + context, + threadId, + runId, + toolOutputs, + options, + ); + return _submitToolOutputsToRunDeserialize(result); +} + +export function _cancelRunSend( + context: Client, + threadId: string, + runId: string, + options: CancelRunOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads/{threadId}/runs/{runId}/cancel", threadId, runId) + .post({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _cancelRunDeserialize( + result: CancelRun200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + threadId: result.body["thread_id"], + assistantId: result.body["assistant_id"], + status: result.body["status"] as RunStatus, + requiredAction: !result.body.requiredAction + ? result.body.requiredAction + : deserializeRequiredActionUnion(result.body.required_action), + lastError: + result.body.last_error === null + ? null + : { + code: result.body.last_error["code"], + message: result.body.last_error["message"], + }, + model: result.body["model"], + instructions: result.body["instructions"], + tools: result.body["tools"], + createdAt: new Date(result.body["created_at"]), + expiresAt: + result.body["expires_at"] === null + ? null + : new Date(result.body["expires_at"]), + startedAt: + result.body["started_at"] === null + ? null + : new Date(result.body["started_at"]), + completedAt: + result.body["completed_at"] === null + ? null + : new Date(result.body["completed_at"]), + cancelledAt: + result.body["cancelled_at"] === null + ? null + : new Date(result.body["cancelled_at"]), + failedAt: + result.body["failed_at"] === null + ? null + : new Date(result.body["failed_at"]), + incompleteDetails: result.body[ + "incomplete_details" + ] as IncompleteRunDetails, + usage: + result.body.usage === null + ? null + : { + completionTokens: result.body.usage["completion_tokens"], + promptTokens: result.body.usage["prompt_tokens"], + totalTokens: result.body.usage["total_tokens"], + }, + temperature: result.body["temperature"], + topP: result.body["top_p"], + maxPromptTokens: result.body["max_prompt_tokens"], + maxCompletionTokens: result.body["max_completion_tokens"], + truncationStrategy: + result.body.truncation_strategy === null + ? null + : { + type: result.body.truncation_strategy["type"] as TruncationStrategy, + lastMessages: result.body.truncation_strategy["last_messages"], + }, + toolChoice: result.body["tool_choice"], + responseFormat: result.body["response_format"], + metadata: result.body["metadata"], + }; +} + +/** Cancels a run of an in progress thread. */ +export async function cancelRun( + context: Client, + threadId: string, + runId: string, + options: CancelRunOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _cancelRunSend(context, threadId, runId, options); + return _cancelRunDeserialize(result); +} + +export function _createThreadAndRunSend( + context: Client, + body: CreateAndRunThreadOptions, + options: CreateThreadAndRunOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads/runs") + .post({ + ...operationOptionsToRequestParameters(options), + body: { + assistant_id: body["assistantId"], + thread: !body.thread + ? body.thread + : assistantThreadCreationOptionsSerializer(body.thread), + model: body["model"], + instructions: body["instructions"], + tools: body["tools"], + tool_resources: !body.toolResources + ? body.toolResources + : updateToolResourcesOptionsSerializer(body.toolResources), + stream: body["stream"], + temperature: body["temperature"], + top_p: body["topP"], + max_prompt_tokens: body["maxPromptTokens"], + max_completion_tokens: body["maxCompletionTokens"], + truncation_strategy: !body.truncationStrategy + ? body.truncationStrategy + : truncationObjectSerializer(body.truncationStrategy), + tool_choice: body["toolChoice"], + response_format: body["responseFormat"], + metadata: !body.metadata + ? body.metadata + : (serializeRecord(body.metadata as any) as any), + }, + }); +} + +export async function _createThreadAndRunDeserialize( + result: CreateThreadAndRun200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + threadId: result.body["thread_id"], + assistantId: result.body["assistant_id"], + status: result.body["status"] as RunStatus, + requiredAction: !result.body.requiredAction + ? result.body.requiredAction + : deserializeRequiredActionUnion(result.body.required_action), + lastError: + result.body.last_error === null + ? null + : { + code: result.body.last_error["code"], + message: result.body.last_error["message"], + }, + model: result.body["model"], + instructions: result.body["instructions"], + tools: result.body["tools"], + createdAt: new Date(result.body["created_at"]), + expiresAt: + result.body["expires_at"] === null + ? null + : new Date(result.body["expires_at"]), + startedAt: + result.body["started_at"] === null + ? null + : new Date(result.body["started_at"]), + completedAt: + result.body["completed_at"] === null + ? null + : new Date(result.body["completed_at"]), + cancelledAt: + result.body["cancelled_at"] === null + ? null + : new Date(result.body["cancelled_at"]), + failedAt: + result.body["failed_at"] === null + ? null + : new Date(result.body["failed_at"]), + incompleteDetails: result.body[ + "incomplete_details" + ] as IncompleteRunDetails, + usage: + result.body.usage === null + ? null + : { + completionTokens: result.body.usage["completion_tokens"], + promptTokens: result.body.usage["prompt_tokens"], + totalTokens: result.body.usage["total_tokens"], + }, + temperature: result.body["temperature"], + topP: result.body["top_p"], + maxPromptTokens: result.body["max_prompt_tokens"], + maxCompletionTokens: result.body["max_completion_tokens"], + truncationStrategy: + result.body.truncation_strategy === null + ? null + : { + type: result.body.truncation_strategy["type"] as TruncationStrategy, + lastMessages: result.body.truncation_strategy["last_messages"], + }, + toolChoice: result.body["tool_choice"], + responseFormat: result.body["response_format"], + metadata: result.body["metadata"], + }; +} + +/** Creates a new assistant thread and immediately starts a run using that new thread. */ +export async function createThreadAndRun( + context: Client, + body: CreateAndRunThreadOptions, + options: CreateThreadAndRunOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createThreadAndRunSend(context, body, options); + return _createThreadAndRunDeserialize(result); +} + +export function _getRunStepSend( + context: Client, + threadId: string, + runId: string, + stepId: string, + options: GetRunStepOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path( + "/threads/{threadId}/runs/{runId}/steps/{stepId}", + threadId, + runId, + stepId, + ) + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _getRunStepDeserialize( + result: GetRunStep200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + type: result.body["type"] as RunStepType, + assistantId: result.body["assistant_id"], + threadId: result.body["thread_id"], + runId: result.body["run_id"], + status: result.body["status"] as RunStepStatus, + stepDetails: deserializeRunStepDetailsUnion(result.body.step_details), + lastError: + result.body.last_error === null + ? null + : { + code: result.body.last_error["code"] as RunStepErrorCode, + message: result.body.last_error["message"], + }, + createdAt: new Date(result.body["created_at"]), + expiredAt: + result.body["expired_at"] === null + ? null + : new Date(result.body["expired_at"]), + completedAt: + result.body["completed_at"] === null + ? null + : new Date(result.body["completed_at"]), + cancelledAt: + result.body["cancelled_at"] === null + ? null + : new Date(result.body["cancelled_at"]), + failedAt: + result.body["failed_at"] === null + ? null + : new Date(result.body["failed_at"]), + usage: + result.body.usage === null + ? null + : !result.body.usage + ? undefined + : { + completionTokens: result.body.usage?.["completion_tokens"], + promptTokens: result.body.usage?.["prompt_tokens"], + totalTokens: result.body.usage?.["total_tokens"], + }, + metadata: result.body["metadata"], + }; +} + +/** Gets a single run step from a thread run. */ +export async function getRunStep( + context: Client, + threadId: string, + runId: string, + stepId: string, + options: GetRunStepOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getRunStepSend( + context, + threadId, + runId, + stepId, + options, + ); + return _getRunStepDeserialize(result); +} + +export function _listRunStepsSend( + context: Client, + threadId: string, + runId: string, + options: ListRunStepsOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/threads/{threadId}/runs/{runId}/steps", threadId, runId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + limit: options?.limit, + order: options?.order, + after: options?.after, + before: options?.before, + }, + }); +} + +export async function _listRunStepsDeserialize( + result: ListRunSteps200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + object: result.body["object"], + data: result.body["data"].map((p) => ({ + id: p["id"], + object: p["object"], + type: p["type"] as RunStepType, + assistantId: p["assistant_id"], + threadId: p["thread_id"], + runId: p["run_id"], + status: p["status"] as RunStepStatus, + stepDetails: deserializeRunStepDetailsUnion(p.step_details), + lastError: + p.last_error === null + ? null + : { + code: p.last_error["code"] as RunStepErrorCode, + message: p.last_error["message"], + }, + createdAt: new Date(p["created_at"]), + expiredAt: p["expired_at"] === null ? null : new Date(p["expired_at"]), + completedAt: + p["completed_at"] === null ? null : new Date(p["completed_at"]), + cancelledAt: + p["cancelled_at"] === null ? null : new Date(p["cancelled_at"]), + failedAt: p["failed_at"] === null ? null : new Date(p["failed_at"]), + usage: + p.usage === null + ? null + : !p.usage + ? undefined + : { + completionTokens: p.usage?.["completion_tokens"], + promptTokens: p.usage?.["prompt_tokens"], + totalTokens: p.usage?.["total_tokens"], + }, + metadata: p["metadata"], + })), + firstId: result.body["first_id"], + lastId: result.body["last_id"], + hasMore: result.body["has_more"], + }; +} + +/** Gets a list of run steps from a thread run. */ +export async function listRunSteps( + context: Client, + threadId: string, + runId: string, + options: ListRunStepsOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _listRunStepsSend(context, threadId, runId, options); + return _listRunStepsDeserialize(result); +} + +export function _listFilesSend( + context: Client, + options: ListFilesOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/files") + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { purpose: options?.purpose }, + }); +} + +export async function _listFilesDeserialize( + result: ListFiles200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + object: result.body["object"], + data: result.body["data"].map((p) => ({ + object: p["object"], + id: p["id"], + bytes: p["bytes"], + filename: p["filename"], + createdAt: new Date(p["created_at"]), + purpose: p["purpose"] as FilePurpose, + status: p["status"] as FileState, + statusDetails: p["status_details"], + })), + }; +} + +/** Gets a list of previously uploaded files. */ +export async function listFiles( + context: Client, + options: ListFilesOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _listFilesSend(context, options); + return _listFilesDeserialize(result); +} + +export function _uploadFileSend( + context: Client, + file: Uint8Array, + purpose: FilePurpose, + options: UploadFileOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/files") + .post({ + ...operationOptionsToRequestParameters(options), + contentType: (options.contentType as any) ?? "multipart/form-data", + body: { + file: uint8ArrayToString(file, "base64"), + purpose: purpose, + filename: options?.filename, + }, + }); +} + +export async function _uploadFileDeserialize( + result: UploadFile200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + object: result.body["object"], + id: result.body["id"], + bytes: result.body["bytes"], + filename: result.body["filename"], + createdAt: new Date(result.body["created_at"]), + purpose: result.body["purpose"] as FilePurpose, + status: result.body["status"] as FileState, + statusDetails: result.body["status_details"], + }; +} + +/** Uploads a file for use by other operations. */ +export async function uploadFile( + context: Client, + file: Uint8Array, + purpose: FilePurpose, + options: UploadFileOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _uploadFileSend(context, file, purpose, options); + return _uploadFileDeserialize(result); +} + +export function _deleteFileSend( + context: Client, + fileId: string, + options: DeleteFileOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/files/{fileId}", fileId) + .delete({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _deleteFileDeserialize( + result: DeleteFile200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + deleted: result.body["deleted"], + object: result.body["object"], + }; +} + +/** Delete a previously uploaded file. */ +export async function deleteFile( + context: Client, + fileId: string, + options: DeleteFileOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteFileSend(context, fileId, options); + return _deleteFileDeserialize(result); +} + +export function _getFileSend( + context: Client, + fileId: string, + options: GetFileOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/files/{fileId}", fileId) + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _getFileDeserialize( + result: GetFile200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + object: result.body["object"], + id: result.body["id"], + bytes: result.body["bytes"], + filename: result.body["filename"], + createdAt: new Date(result.body["created_at"]), + purpose: result.body["purpose"] as FilePurpose, + status: result.body["status"] as FileState, + statusDetails: result.body["status_details"], + }; +} + +/** Returns information about a specific file. Does not retrieve file content. */ +export async function getFile( + context: Client, + fileId: string, + options: GetFileOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getFileSend(context, fileId, options); + return _getFileDeserialize(result); +} + +export function _getFileContentSend( + context: Client, + fileId: string, + options: GetFileContentOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/files/{fileId}/content", fileId) + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _getFileContentDeserialize( + result: GetFileContent200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return typeof result.body === "string" + ? stringToUint8Array(result.body, "base64") + : result.body; +} + +/** Returns information about a specific file. Does not retrieve file content. */ +export async function getFileContent( + context: Client, + fileId: string, + options: GetFileContentOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getFileContentSend(context, fileId, options); + return _getFileContentDeserialize(result); +} + +export function _listVectorStoresSend( + context: Client, + options: ListVectorStoresOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/vector_stores") + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + limit: options?.limit, + order: options?.order, + after: options?.after, + before: options?.before, + }, + }); +} + +export async function _listVectorStoresDeserialize( + result: ListVectorStores200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + object: result.body["object"], + data: result.body["data"].map((p) => ({ + id: p["id"], + object: p["object"], + createdAt: new Date(p["created_at"]), + name: p["name"], + usageBytes: p["usage_bytes"], + fileCounts: { + inProgress: p.file_counts["in_progress"], + completed: p.file_counts["completed"], + failed: p.file_counts["failed"], + cancelled: p.file_counts["cancelled"], + total: p.file_counts["total"], + }, + status: p["status"] as VectorStoreStatus, + expiresAfter: !p.expires_after + ? undefined + : { + anchor: p.expires_after?.[ + "anchor" + ] as VectorStoreExpirationPolicyAnchor, + days: p.expires_after?.["days"], + }, + expiresAt: + p["expires_at"] !== undefined ? new Date(p["expires_at"]) : undefined, + lastActiveAt: + p["last_active_at"] === null ? null : new Date(p["last_active_at"]), + metadata: p["metadata"], + })), + firstId: result.body["first_id"], + lastId: result.body["last_id"], + hasMore: result.body["has_more"], + }; +} + +/** Returns a list of vector stores. */ +export async function listVectorStores( + context: Client, + options: ListVectorStoresOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _listVectorStoresSend(context, options); + return _listVectorStoresDeserialize(result); +} + +export function _createVectorStoreSend( + context: Client, + body: VectorStoreOptions, + options: CreateVectorStoreOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/vector_stores") + .post({ + ...operationOptionsToRequestParameters(options), + body: { + file_ids: body["fileIds"], + name: body["name"], + expires_after: !body.expiresAfter + ? body.expiresAfter + : vectorStoreExpirationPolicySerializer(body.expiresAfter), + metadata: !body.metadata + ? body.metadata + : (serializeRecord(body.metadata as any) as any), + }, + }); +} + +export async function _createVectorStoreDeserialize( + result: CreateVectorStore200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + name: result.body["name"], + usageBytes: result.body["usage_bytes"], + fileCounts: { + inProgress: result.body.file_counts["in_progress"], + completed: result.body.file_counts["completed"], + failed: result.body.file_counts["failed"], + cancelled: result.body.file_counts["cancelled"], + total: result.body.file_counts["total"], + }, + status: result.body["status"] as VectorStoreStatus, + expiresAfter: !result.body.expires_after + ? undefined + : { + anchor: result.body.expires_after?.[ + "anchor" + ] as VectorStoreExpirationPolicyAnchor, + days: result.body.expires_after?.["days"], + }, + expiresAt: + result.body["expires_at"] !== undefined + ? new Date(result.body["expires_at"]) + : undefined, + lastActiveAt: + result.body["last_active_at"] === null + ? null + : new Date(result.body["last_active_at"]), + metadata: result.body["metadata"], + }; +} + +/** Creates a vector store. */ +export async function createVectorStore( + context: Client, + body: VectorStoreOptions, + options: CreateVectorStoreOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createVectorStoreSend(context, body, options); + return _createVectorStoreDeserialize(result); +} + +export function _getVectorStoreSend( + context: Client, + vectorStoreId: string, + options: GetVectorStoreOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/vector_stores/{vectorStoreId}", vectorStoreId) + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _getVectorStoreDeserialize( + result: GetVectorStore200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + name: result.body["name"], + usageBytes: result.body["usage_bytes"], + fileCounts: { + inProgress: result.body.file_counts["in_progress"], + completed: result.body.file_counts["completed"], + failed: result.body.file_counts["failed"], + cancelled: result.body.file_counts["cancelled"], + total: result.body.file_counts["total"], + }, + status: result.body["status"] as VectorStoreStatus, + expiresAfter: !result.body.expires_after + ? undefined + : { + anchor: result.body.expires_after?.[ + "anchor" + ] as VectorStoreExpirationPolicyAnchor, + days: result.body.expires_after?.["days"], + }, + expiresAt: + result.body["expires_at"] !== undefined + ? new Date(result.body["expires_at"]) + : undefined, + lastActiveAt: + result.body["last_active_at"] === null + ? null + : new Date(result.body["last_active_at"]), + metadata: result.body["metadata"], + }; +} + +/** Returns the vector store object matching the specified ID. */ +export async function getVectorStore( + context: Client, + vectorStoreId: string, + options: GetVectorStoreOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getVectorStoreSend(context, vectorStoreId, options); + return _getVectorStoreDeserialize(result); +} + +export function _modifyVectorStoreSend( + context: Client, + vectorStoreId: string, + body: VectorStoreUpdateOptions, + options: ModifyVectorStoreOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/vector_stores/{vectorStoreId}", vectorStoreId) + .post({ + ...operationOptionsToRequestParameters(options), + body: { + name: body["name"], + expires_after: !body.expiresAfter + ? body.expiresAfter + : vectorStoreExpirationPolicySerializer(body.expiresAfter), + metadata: !body.metadata + ? body.metadata + : (serializeRecord(body.metadata as any) as any), + }, + }); +} + +export async function _modifyVectorStoreDeserialize( + result: ModifyVectorStore200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + name: result.body["name"], + usageBytes: result.body["usage_bytes"], + fileCounts: { + inProgress: result.body.file_counts["in_progress"], + completed: result.body.file_counts["completed"], + failed: result.body.file_counts["failed"], + cancelled: result.body.file_counts["cancelled"], + total: result.body.file_counts["total"], + }, + status: result.body["status"] as VectorStoreStatus, + expiresAfter: !result.body.expires_after + ? undefined + : { + anchor: result.body.expires_after?.[ + "anchor" + ] as VectorStoreExpirationPolicyAnchor, + days: result.body.expires_after?.["days"], + }, + expiresAt: + result.body["expires_at"] !== undefined + ? new Date(result.body["expires_at"]) + : undefined, + lastActiveAt: + result.body["last_active_at"] === null + ? null + : new Date(result.body["last_active_at"]), + metadata: result.body["metadata"], + }; +} + +/** The ID of the vector store to modify. */ +export async function modifyVectorStore( + context: Client, + vectorStoreId: string, + body: VectorStoreUpdateOptions, + options: ModifyVectorStoreOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _modifyVectorStoreSend( + context, + vectorStoreId, + body, + options, + ); + return _modifyVectorStoreDeserialize(result); +} + +export function _deleteVectorStoreSend( + context: Client, + vectorStoreId: string, + options: DeleteVectorStoreOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/vector_stores/{vectorStoreId}", vectorStoreId) + .delete({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _deleteVectorStoreDeserialize( + result: DeleteVectorStore200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + deleted: result.body["deleted"], + object: result.body["object"], + }; +} + +/** Deletes the vector store object matching the specified ID. */ +export async function deleteVectorStore( + context: Client, + vectorStoreId: string, + options: DeleteVectorStoreOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteVectorStoreSend(context, vectorStoreId, options); + return _deleteVectorStoreDeserialize(result); +} + +export function _listVectorStoreFilesSend( + context: Client, + vectorStoreId: string, + options: ListVectorStoreFilesOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/vector_stores/{vectorStoreId}/files", vectorStoreId) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + filter: options?.filter, + limit: options?.limit, + order: options?.order, + after: options?.after, + before: options?.before, + }, + }); +} + +export async function _listVectorStoreFilesDeserialize( + result: ListVectorStoreFiles200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + object: result.body["object"], + data: result.body["data"].map((p) => ({ + id: p["id"], + object: p["object"], + usageBytes: p["usage_bytes"], + createdAt: new Date(p["created_at"]), + vectorStoreId: p["vector_store_id"], + status: p["status"] as VectorStoreFileStatus, + lastError: + p.last_error === null + ? null + : { + code: p.last_error["code"] as VectorStoreFileErrorCode, + message: p.last_error["message"], + }, + })), + firstId: result.body["first_id"], + lastId: result.body["last_id"], + hasMore: result.body["has_more"], + }; +} + +/** Returns a list of vector store files. */ +export async function listVectorStoreFiles( + context: Client, + vectorStoreId: string, + options: ListVectorStoreFilesOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _listVectorStoreFilesSend( + context, + vectorStoreId, + options, + ); + return _listVectorStoreFilesDeserialize(result); +} + +export function _createVectorStoreFileSend( + context: Client, + vectorStoreId: string, + fileId: string, + options: CreateVectorStoreFileOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/vector_stores/{vectorStoreId}/files", vectorStoreId) + .post({ + ...operationOptionsToRequestParameters(options), + body: { file_id: fileId }, + }); +} + +export async function _createVectorStoreFileDeserialize( + result: CreateVectorStoreFile200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + usageBytes: result.body["usage_bytes"], + createdAt: new Date(result.body["created_at"]), + vectorStoreId: result.body["vector_store_id"], + status: result.body["status"] as VectorStoreFileStatus, + lastError: + result.body.last_error === null + ? null + : { + code: result.body.last_error["code"] as VectorStoreFileErrorCode, + message: result.body.last_error["message"], + }, + }; +} + +/** Create a vector store file by attaching a file to a vector store. */ +export async function createVectorStoreFile( + context: Client, + vectorStoreId: string, + fileId: string, + options: CreateVectorStoreFileOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createVectorStoreFileSend( + context, + vectorStoreId, + fileId, + options, + ); + return _createVectorStoreFileDeserialize(result); +} + +export function _getVectorStoreFileSend( + context: Client, + vectorStoreId: string, + fileId: string, + options: GetVectorStoreFileOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path( + "/vector_stores/{vectorStoreId}/files/{fileId}", + vectorStoreId, + fileId, + ) + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _getVectorStoreFileDeserialize( + result: GetVectorStoreFile200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + usageBytes: result.body["usage_bytes"], + createdAt: new Date(result.body["created_at"]), + vectorStoreId: result.body["vector_store_id"], + status: result.body["status"] as VectorStoreFileStatus, + lastError: + result.body.last_error === null + ? null + : { + code: result.body.last_error["code"] as VectorStoreFileErrorCode, + message: result.body.last_error["message"], + }, + }; +} + +/** Retrieves a vector store file. */ +export async function getVectorStoreFile( + context: Client, + vectorStoreId: string, + fileId: string, + options: GetVectorStoreFileOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getVectorStoreFileSend( + context, + vectorStoreId, + fileId, + options, + ); + return _getVectorStoreFileDeserialize(result); +} + +export function _deleteVectorStoreFileSend( + context: Client, + vectorStoreId: string, + fileId: string, + options: DeleteVectorStoreFileOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path( + "/vector_stores/{vectorStoreId}/files/{fileId}", + vectorStoreId, + fileId, + ) + .delete({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _deleteVectorStoreFileDeserialize( + result: DeleteVectorStoreFile200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + deleted: result.body["deleted"], + object: result.body["object"], + }; +} + +/** + * Delete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. + * To delete the file, use the delete file endpoint. + */ +export async function deleteVectorStoreFile( + context: Client, + vectorStoreId: string, + fileId: string, + options: DeleteVectorStoreFileOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _deleteVectorStoreFileSend( + context, + vectorStoreId, + fileId, + options, + ); + return _deleteVectorStoreFileDeserialize(result); +} + +export function _createVectorStoreFileBatchSend( + context: Client, + vectorStoreId: string, + fileIds: string[], + options: CreateVectorStoreFileBatchOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path("/vector_stores/{vectorStoreId}/file_batches", vectorStoreId) + .post({ + ...operationOptionsToRequestParameters(options), + body: { file_ids: fileIds }, + }); +} + +export async function _createVectorStoreFileBatchDeserialize( + result: CreateVectorStoreFileBatch200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + vectorStoreId: result.body["vector_store_id"], + status: result.body["status"] as VectorStoreFileBatchStatus, + fileCounts: { + inProgress: result.body.file_counts["in_progress"], + completed: result.body.file_counts["completed"], + failed: result.body.file_counts["failed"], + cancelled: result.body.file_counts["cancelled"], + total: result.body.file_counts["total"], + }, + }; +} + +/** Create a vector store file batch. */ +export async function createVectorStoreFileBatch( + context: Client, + vectorStoreId: string, + fileIds: string[], + options: CreateVectorStoreFileBatchOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _createVectorStoreFileBatchSend( + context, + vectorStoreId, + fileIds, + options, + ); + return _createVectorStoreFileBatchDeserialize(result); +} + +export function _getVectorStoreFileBatchSend( + context: Client, + vectorStoreId: string, + batchId: string, + options: GetVectorStoreFileBatchOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path( + "/vector_stores/{vectorStoreId}/file_batches/{batchId}", + vectorStoreId, + batchId, + ) + .get({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _getVectorStoreFileBatchDeserialize( + result: GetVectorStoreFileBatch200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + vectorStoreId: result.body["vector_store_id"], + status: result.body["status"] as VectorStoreFileBatchStatus, + fileCounts: { + inProgress: result.body.file_counts["in_progress"], + completed: result.body.file_counts["completed"], + failed: result.body.file_counts["failed"], + cancelled: result.body.file_counts["cancelled"], + total: result.body.file_counts["total"], + }, + }; +} + +/** Retrieve a vector store file batch. */ +export async function getVectorStoreFileBatch( + context: Client, + vectorStoreId: string, + batchId: string, + options: GetVectorStoreFileBatchOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _getVectorStoreFileBatchSend( + context, + vectorStoreId, + batchId, + options, + ); + return _getVectorStoreFileBatchDeserialize(result); +} + +export function _cancelVectorStoreFileBatchSend( + context: Client, + vectorStoreId: string, + batchId: string, + options: CancelVectorStoreFileBatchOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path( + "/vector_stores/{vectorStoreId}/file_batches/{batchId}/cancel", + vectorStoreId, + batchId, + ) + .post({ ...operationOptionsToRequestParameters(options) }); +} + +export async function _cancelVectorStoreFileBatchDeserialize( + result: CancelVectorStoreFileBatch200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + id: result.body["id"], + object: result.body["object"], + createdAt: new Date(result.body["created_at"]), + vectorStoreId: result.body["vector_store_id"], + status: result.body["status"] as VectorStoreFileBatchStatus, + fileCounts: { + inProgress: result.body.file_counts["in_progress"], + completed: result.body.file_counts["completed"], + failed: result.body.file_counts["failed"], + cancelled: result.body.file_counts["cancelled"], + total: result.body.file_counts["total"], + }, + }; +} + +/** Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible. */ +export async function cancelVectorStoreFileBatch( + context: Client, + vectorStoreId: string, + batchId: string, + options: CancelVectorStoreFileBatchOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _cancelVectorStoreFileBatchSend( + context, + vectorStoreId, + batchId, + options, + ); + return _cancelVectorStoreFileBatchDeserialize(result); +} + +export function _listVectorStoreFileBatchFilesSend( + context: Client, + vectorStoreId: string, + batchId: string, + options: ListVectorStoreFileBatchFilesOptionalParams = { requestOptions: {} }, +): StreamableMethod { + return context + .path( + "/vector_stores/{vectorStoreId}/file_batches/{batchId}/files", + vectorStoreId, + batchId, + ) + .get({ + ...operationOptionsToRequestParameters(options), + queryParameters: { + filter: options?.filter, + limit: options?.limit, + order: options?.order, + after: options?.after, + before: options?.before, + }, + }); +} + +export async function _listVectorStoreFileBatchFilesDeserialize( + result: ListVectorStoreFileBatchFiles200Response, +): Promise { + if (result.status !== "200") { + throw createRestError(result); + } + + return { + object: result.body["object"], + data: result.body["data"].map((p) => ({ + id: p["id"], + object: p["object"], + usageBytes: p["usage_bytes"], + createdAt: new Date(p["created_at"]), + vectorStoreId: p["vector_store_id"], + status: p["status"] as VectorStoreFileStatus, + lastError: + p.last_error === null + ? null + : { + code: p.last_error["code"] as VectorStoreFileErrorCode, + message: p.last_error["message"], + }, + })), + firstId: result.body["first_id"], + lastId: result.body["last_id"], + hasMore: result.body["has_more"], + }; +} + +/** Returns a list of vector store files in a batch. */ +export async function listVectorStoreFileBatchFiles( + context: Client, + vectorStoreId: string, + batchId: string, + options: ListVectorStoreFileBatchFilesOptionalParams = { requestOptions: {} }, +): Promise { + const result = await _listVectorStoreFileBatchFilesSend( + context, + vectorStoreId, + batchId, + options, + ); + return _listVectorStoreFileBatchFilesDeserialize(result); } diff --git a/sdk/openai/openai/src/api/policies/nonAzure.ts b/sdk/openai/openai/src/api/policies/nonAzure.ts deleted file mode 100644 index 34944e9d0907..000000000000 --- a/sdk/openai/openai/src/api/policies/nonAzure.ts +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { PipelinePolicy } from "@azure/core-rest-pipeline"; - -export function nonAzurePolicy(): PipelinePolicy { - const policy: PipelinePolicy = { - name: "openAiEndpoint", - sendRequest: (request, next) => { - const obj = new URL(request.url); - const parts = obj.pathname.split("/"); - switch (parts[parts.length - 1]) { - case "completions": - if (parts[parts.length - 2] === "chat") { - obj.pathname = `${parts[1]}/chat/completions`; - } else { - obj.pathname = `${parts[1]}/completions`; - } - break; - case "embeddings": - obj.pathname = `${parts[1]}/embeddings`; - break; - case "generations": - if (parts[parts.length - 2] === "images") { - obj.pathname = `${parts[1]}/images/generations`; - } else { - throw new Error("Unexpected path"); - } - break; - case "transcriptions": - obj.pathname = `${parts[1]}/audio/transcriptions`; - break; - case "translations": - obj.pathname = `${parts[1]}/audio/translations`; - break; - } - obj.searchParams.delete("api-version"); - request.url = obj.toString(); - return next(request); - }, - }; - return policy; -} diff --git a/sdk/openai/openai/src/api/readableStreamUtils.ts b/sdk/openai/openai/src/api/readableStreamUtils.ts deleted file mode 100644 index d67982579af3..000000000000 --- a/sdk/openai/openai/src/api/readableStreamUtils.ts +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -export function polyfillStream(stream: ReadableStream): ReadableStream & AsyncIterable { - makeAsyncIterable(stream); - return stream; -} - -function makeAsyncIterable( - webStream: any, -): asserts webStream is ReadableStream & AsyncIterable { - if (!webStream[Symbol.asyncIterator]) { - webStream[Symbol.asyncIterator] = () => toAsyncIterable(webStream); - } - - if (!webStream.values) { - webStream.values = () => toAsyncIterable(webStream); - } -} - -async function* toAsyncIterable(stream: ReadableStream): AsyncIterableIterator { - const reader = stream.getReader(); - try { - while (true) { - const { value, done } = await reader.read(); - if (done) { - return; - } - yield value; - } - } finally { - const cancelPromise = reader.cancel(); - reader.releaseLock(); - await cancelPromise; - } -} - -export async function streamToText(stream: ReadableStream): Promise { - const reader = stream.getReader(); - const buffers: Uint8Array[] = []; - let length = 0; - try { - // eslint-disable-next-line no-constant-condition - while (true) { - const { value, done } = await reader.read(); - if (done) { - return new TextDecoder().decode(concatBuffers(buffers, length)); - } - length += value.length; - buffers.push(value); - } - } finally { - reader.releaseLock(); - } -} - -function getBuffersLength(buffers: Uint8Array[]): number { - return buffers.reduce((acc, curr) => acc + curr.length, 0); -} - -function concatBuffers(buffers: Uint8Array[], len?: number): Uint8Array { - const length = len ?? getBuffersLength(buffers); - const res = new Uint8Array(length); - for (let i = 0, pos = 0; i < buffers.length; i++) { - const buffer = buffers[i]; - res.set(buffer, pos); - pos += buffer.length; - } - - return res; -} diff --git a/sdk/openai/openai/src/api/util.ts b/sdk/openai/openai/src/api/util.ts deleted file mode 100644 index ad73e051d52a..000000000000 --- a/sdk/openai/openai/src/api/util.ts +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { PathUncheckedResponse } from "@azure-rest/core-client"; -import { OpenAIError } from "../models/models.js"; -import { createHttpHeaders } from "@azure/core-rest-pipeline"; -import { isError } from "@azure/core-util"; - -type CamelCase = S extends `${infer P1}_${infer P2}` - ? `${Lowercase}${Capitalize>}` - : Lowercase; -type SnakeCase = S extends `${infer T}${infer U}` - ? `${T extends Capitalize ? "_" : ""}${Lowercase}${SnakeCase}` - : S; - -type MapCamelCaseKeysOverCollections = - T extends Array ? Array> : CamelCaseKeys; -type MapSnakeCaseKeysOverCollections = - T extends Array - ? Array> - : // : T extends (infer X | infer Y) - // ? MapSnakeCaseKeysOverCollections | MapSnakeCaseKeysOverCollections - SnakeCaseKeys; -type CamelCaseKeys = { - [K in keyof T as CamelCase]: MapCamelCaseKeysOverCollections; -}; -export type SnakeCaseKeys = { - [K in keyof T as SnakeCase]: MapSnakeCaseKeysOverCollections; -}; - -export function wrapError(f: () => T, message: string): T { - try { - const result = f(); - return result; - } catch (cause) { - throw new Error(`${message}: ${cause}`, { cause }); - } -} - -export function camelCaseKeys>(obj: O): CamelCaseKeys { - if (typeof obj !== "object" || !obj) return obj; - if (Array.isArray(obj)) { - return obj.map((v) => - camelCaseKeys ? (X extends Record ? X : never) : never>( - v, - ), - ) as CamelCaseKeys; - } else { - for (const key of Object.keys(obj)) { - const value = obj[key]; - const newKey = tocamelCase(key); - if (newKey !== key) { - delete obj[key]; - } - (obj[newKey] as Record) = - typeof obj[newKey] === "object" ? camelCaseKeys(value) : value; - } - return obj; - } -} - -export function snakeCaseKeys>(obj: O): SnakeCaseKeys { - if (typeof obj !== "object" || !obj) return obj; - if (Array.isArray(obj)) { - return obj.map((v) => - snakeCaseKeys ? (X extends Record ? X : never) : never>( - v, - ), - ) as SnakeCaseKeys; - } else { - for (const key of Object.keys(obj)) { - const value = obj[key]; - const newKey = toSnakeCase(key); - if (newKey !== key) { - delete obj[key]; - } - (obj[newKey] as Record) = - typeof obj[newKey] === "object" ? snakeCaseKeys(value) : value; - } - return obj; - } -} - -function tocamelCase

(str: P): CamelCase

{ - return str - .toLowerCase() - .replace(/([_][a-z])/g, (group) => group.toUpperCase().replace("_", "")) as CamelCase

; -} - -function toSnakeCase

(str: P): SnakeCase

{ - return str - .replace(/([A-Z])/g, (group) => `_${group.toLowerCase()}`) - .replace(/^_/, "") as SnakeCase

; -} - -function statusCodeToNumber(statusCode: string): number | undefined { - const status = Number.parseInt(statusCode); - - return Number.isNaN(status) ? undefined : status; -} - -export function createOpenAIError(result: PathUncheckedResponse): OpenAIError { - const err = result.body.error ?? result.body; - if (!err) { - throw new Error("An error response has been received but can't be parsed"); - } - const statusCode = statusCodeToNumber(result.status); - return new OpenAIError(err.message, err.param, err.type, { - code: err.code, - statusCode, - response: { - headers: createHttpHeaders(result.headers), - request: result.request, - status: statusCode ?? -1, - }, - }); -} - -/** - * Typeguard for RestError - * @param e - Something caught by a catch clause. - */ -export function isOpenAIError(e: unknown): e is OpenAIError { - if (e instanceof OpenAIError) { - return true; - } - return isError(e) && e.name === "OpenAIError"; -} diff --git a/sdk/openai/openai/src/assistantsClient.ts b/sdk/openai/openai/src/assistantsClient.ts new file mode 100644 index 000000000000..4873a1006997 --- /dev/null +++ b/sdk/openai/openai/src/assistantsClient.ts @@ -0,0 +1,515 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { TokenCredential, KeyCredential } from "@azure/core-auth"; +import { Pipeline } from "@azure/core-rest-pipeline"; +import { + AssistantCreationOptions, + Assistant, + OpenAIPageableListOfAssistant, + UpdateAssistantOptions, + AssistantDeletionStatus, + AssistantThreadCreationOptions, + ThreadMessageOptions, + AssistantThread, + UpdateAssistantThreadOptions, + ThreadDeletionStatus, + ThreadMessage, + OpenAIPageableListOfThreadMessage, + CreateRunOptions, + ThreadRun, + OpenAIPageableListOfThreadRun, + ToolOutput, + CreateAndRunThreadOptions, + RunStep, + OpenAIPageableListOfRunStep, + FilePurpose, + FileListResponse, + OpenAIFile, + FileDeletionStatus, + OpenAIPageableListOfVectorStore, + VectorStore, + VectorStoreOptions, + VectorStoreUpdateOptions, + VectorStoreDeletionStatus, + OpenAIPageableListOfVectorStoreFile, + VectorStoreFile, + VectorStoreFileDeletionStatus, + VectorStoreFileBatch, +} from "./models/models.js"; +import { + CreateAssistantOptionalParams, + ListAssistantsOptionalParams, + GetAssistantOptionalParams, + UpdateAssistantOptionalParams, + DeleteAssistantOptionalParams, + CreateThreadOptionalParams, + GetThreadOptionalParams, + UpdateThreadOptionalParams, + DeleteThreadOptionalParams, + CreateMessageOptionalParams, + ListMessagesOptionalParams, + GetMessageOptionalParams, + UpdateMessageOptionalParams, + CreateRunOptionalParams, + ListRunsOptionalParams, + GetRunOptionalParams, + UpdateRunOptionalParams, + SubmitToolOutputsToRunOptionalParams, + CancelRunOptionalParams, + CreateThreadAndRunOptionalParams, + GetRunStepOptionalParams, + ListRunStepsOptionalParams, + ListFilesOptionalParams, + UploadFileOptionalParams, + DeleteFileOptionalParams, + GetFileOptionalParams, + GetFileContentOptionalParams, + ListVectorStoresOptionalParams, + CreateVectorStoreOptionalParams, + GetVectorStoreOptionalParams, + ModifyVectorStoreOptionalParams, + DeleteVectorStoreOptionalParams, + ListVectorStoreFilesOptionalParams, + CreateVectorStoreFileOptionalParams, + GetVectorStoreFileOptionalParams, + DeleteVectorStoreFileOptionalParams, + CreateVectorStoreFileBatchOptionalParams, + GetVectorStoreFileBatchOptionalParams, + CancelVectorStoreFileBatchOptionalParams, + ListVectorStoreFileBatchFilesOptionalParams, +} from "./models/options.js"; +import { + createAssistants, + AssistantsClientOptions, + AssistantsContext, + createAssistant, + listAssistants, + getAssistant, + updateAssistant, + deleteAssistant, + createThread, + getThread, + updateThread, + deleteThread, + createMessage, + listMessages, + getMessage, + updateMessage, + createRun, + listRuns, + getRun, + updateRun, + submitToolOutputsToRun, + cancelRun, + createThreadAndRun, + getRunStep, + listRunSteps, + listFiles, + uploadFile, + deleteFile, + getFile, + getFileContent, + listVectorStores, + createVectorStore, + getVectorStore, + modifyVectorStore, + deleteVectorStore, + listVectorStoreFiles, + createVectorStoreFile, + getVectorStoreFile, + deleteVectorStoreFile, + createVectorStoreFileBatch, + getVectorStoreFileBatch, + cancelVectorStoreFileBatch, + listVectorStoreFileBatchFiles, +} from "./api/index.js"; + +export { AssistantsClientOptions } from "./api/assistantsContext.js"; + +export class AssistantsClient { + private _client: AssistantsContext; + /** The pipeline used by this client to make requests */ + public readonly pipeline: Pipeline; + + /** Azure OpenAI APIs for Assistants. */ + constructor( + endpointParam: string, + credential: KeyCredential | TokenCredential, + options: AssistantsClientOptions = {}, + ) { + this._client = createAssistants(endpointParam, credential, options); + this.pipeline = this._client.pipeline; + } + + /** Creates a new assistant. */ + createAssistant( + body: AssistantCreationOptions, + options: CreateAssistantOptionalParams = { requestOptions: {} }, + ): Promise { + return createAssistant(this._client, body, options); + } + + /** Gets a list of assistants that were previously created. */ + listAssistants( + options: ListAssistantsOptionalParams = { requestOptions: {} }, + ): Promise { + return listAssistants(this._client, options); + } + + /** Retrieves an existing assistant. */ + getAssistant( + assistantId: string, + options: GetAssistantOptionalParams = { requestOptions: {} }, + ): Promise { + return getAssistant(this._client, assistantId, options); + } + + /** Modifies an existing assistant. */ + updateAssistant( + assistantId: string, + body: UpdateAssistantOptions, + options: UpdateAssistantOptionalParams = { requestOptions: {} }, + ): Promise { + return updateAssistant(this._client, assistantId, body, options); + } + + /** Deletes an assistant. */ + deleteAssistant( + assistantId: string, + options: DeleteAssistantOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteAssistant(this._client, assistantId, options); + } + + /** Creates a new thread. Threads contain messages and can be run by assistants. */ + createThread( + body: AssistantThreadCreationOptions, + options: CreateThreadOptionalParams = { requestOptions: {} }, + ): Promise { + return createThread(this._client, body, options); + } + + /** Gets information about an existing thread. */ + getThread( + threadId: string, + options: GetThreadOptionalParams = { requestOptions: {} }, + ): Promise { + return getThread(this._client, threadId, options); + } + + /** Modifies an existing thread. */ + updateThread( + threadId: string, + body: UpdateAssistantThreadOptions, + options: UpdateThreadOptionalParams = { requestOptions: {} }, + ): Promise { + return updateThread(this._client, threadId, body, options); + } + + /** Deletes an existing thread. */ + deleteThread( + threadId: string, + options: DeleteThreadOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteThread(this._client, threadId, options); + } + + /** Creates a new message on a specified thread. */ + createMessage( + threadId: string, + threadMessageOptions: ThreadMessageOptions, + options: CreateMessageOptionalParams = { requestOptions: {} }, + ): Promise { + return createMessage(this._client, threadId, threadMessageOptions, options); + } + + /** Gets a list of messages that exist on a thread. */ + listMessages( + threadId: string, + options: ListMessagesOptionalParams = { requestOptions: {} }, + ): Promise { + return listMessages(this._client, threadId, options); + } + + /** Gets an existing message from an existing thread. */ + getMessage( + threadId: string, + messageId: string, + options: GetMessageOptionalParams = { requestOptions: {} }, + ): Promise { + return getMessage(this._client, threadId, messageId, options); + } + + /** Modifies an existing message on an existing thread. */ + updateMessage( + threadId: string, + messageId: string, + options: UpdateMessageOptionalParams = { requestOptions: {} }, + ): Promise { + return updateMessage(this._client, threadId, messageId, options); + } + + /** Creates a new run for an assistant thread. */ + createRun( + threadId: string, + createRunOptions: CreateRunOptions, + options: CreateRunOptionalParams = { requestOptions: {} }, + ): Promise { + return createRun(this._client, threadId, createRunOptions, options); + } + + /** Gets a list of runs for a specified thread. */ + listRuns( + threadId: string, + options: ListRunsOptionalParams = { requestOptions: {} }, + ): Promise { + return listRuns(this._client, threadId, options); + } + + /** Gets an existing run from an existing thread. */ + getRun( + threadId: string, + runId: string, + options: GetRunOptionalParams = { requestOptions: {} }, + ): Promise { + return getRun(this._client, threadId, runId, options); + } + + /** Modifies an existing thread run. */ + updateRun( + threadId: string, + runId: string, + options: UpdateRunOptionalParams = { requestOptions: {} }, + ): Promise { + return updateRun(this._client, threadId, runId, options); + } + + /** Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. */ + submitToolOutputsToRun( + threadId: string, + runId: string, + toolOutputs: ToolOutput[], + options: SubmitToolOutputsToRunOptionalParams = { requestOptions: {} }, + ): Promise { + return submitToolOutputsToRun( + this._client, + threadId, + runId, + toolOutputs, + options, + ); + } + + /** Cancels a run of an in progress thread. */ + cancelRun( + threadId: string, + runId: string, + options: CancelRunOptionalParams = { requestOptions: {} }, + ): Promise { + return cancelRun(this._client, threadId, runId, options); + } + + /** Creates a new assistant thread and immediately starts a run using that new thread. */ + createThreadAndRun( + body: CreateAndRunThreadOptions, + options: CreateThreadAndRunOptionalParams = { requestOptions: {} }, + ): Promise { + return createThreadAndRun(this._client, body, options); + } + + /** Gets a single run step from a thread run. */ + getRunStep( + threadId: string, + runId: string, + stepId: string, + options: GetRunStepOptionalParams = { requestOptions: {} }, + ): Promise { + return getRunStep(this._client, threadId, runId, stepId, options); + } + + /** Gets a list of run steps from a thread run. */ + listRunSteps( + threadId: string, + runId: string, + options: ListRunStepsOptionalParams = { requestOptions: {} }, + ): Promise { + return listRunSteps(this._client, threadId, runId, options); + } + + /** Gets a list of previously uploaded files. */ + listFiles( + options: ListFilesOptionalParams = { requestOptions: {} }, + ): Promise { + return listFiles(this._client, options); + } + + /** Uploads a file for use by other operations. */ + uploadFile( + file: Uint8Array, + purpose: FilePurpose, + options: UploadFileOptionalParams = { requestOptions: {} }, + ): Promise { + return uploadFile(this._client, file, purpose, options); + } + + /** Delete a previously uploaded file. */ + deleteFile( + fileId: string, + options: DeleteFileOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteFile(this._client, fileId, options); + } + + /** Returns information about a specific file. Does not retrieve file content. */ + getFile( + fileId: string, + options: GetFileOptionalParams = { requestOptions: {} }, + ): Promise { + return getFile(this._client, fileId, options); + } + + /** Returns information about a specific file. Does not retrieve file content. */ + getFileContent( + fileId: string, + options: GetFileContentOptionalParams = { requestOptions: {} }, + ): Promise { + return getFileContent(this._client, fileId, options); + } + + /** Returns a list of vector stores. */ + listVectorStores( + options: ListVectorStoresOptionalParams = { requestOptions: {} }, + ): Promise { + return listVectorStores(this._client, options); + } + + /** Creates a vector store. */ + createVectorStore( + body: VectorStoreOptions, + options: CreateVectorStoreOptionalParams = { requestOptions: {} }, + ): Promise { + return createVectorStore(this._client, body, options); + } + + /** Returns the vector store object matching the specified ID. */ + getVectorStore( + vectorStoreId: string, + options: GetVectorStoreOptionalParams = { requestOptions: {} }, + ): Promise { + return getVectorStore(this._client, vectorStoreId, options); + } + + /** The ID of the vector store to modify. */ + modifyVectorStore( + vectorStoreId: string, + body: VectorStoreUpdateOptions, + options: ModifyVectorStoreOptionalParams = { requestOptions: {} }, + ): Promise { + return modifyVectorStore(this._client, vectorStoreId, body, options); + } + + /** Deletes the vector store object matching the specified ID. */ + deleteVectorStore( + vectorStoreId: string, + options: DeleteVectorStoreOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteVectorStore(this._client, vectorStoreId, options); + } + + /** Returns a list of vector store files. */ + listVectorStoreFiles( + vectorStoreId: string, + options: ListVectorStoreFilesOptionalParams = { requestOptions: {} }, + ): Promise { + return listVectorStoreFiles(this._client, vectorStoreId, options); + } + + /** Create a vector store file by attaching a file to a vector store. */ + createVectorStoreFile( + vectorStoreId: string, + fileId: string, + options: CreateVectorStoreFileOptionalParams = { requestOptions: {} }, + ): Promise { + return createVectorStoreFile(this._client, vectorStoreId, fileId, options); + } + + /** Retrieves a vector store file. */ + getVectorStoreFile( + vectorStoreId: string, + fileId: string, + options: GetVectorStoreFileOptionalParams = { requestOptions: {} }, + ): Promise { + return getVectorStoreFile(this._client, vectorStoreId, fileId, options); + } + + /** + * Delete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. + * To delete the file, use the delete file endpoint. + */ + deleteVectorStoreFile( + vectorStoreId: string, + fileId: string, + options: DeleteVectorStoreFileOptionalParams = { requestOptions: {} }, + ): Promise { + return deleteVectorStoreFile(this._client, vectorStoreId, fileId, options); + } + + /** Create a vector store file batch. */ + createVectorStoreFileBatch( + vectorStoreId: string, + fileIds: string[], + options: CreateVectorStoreFileBatchOptionalParams = { requestOptions: {} }, + ): Promise { + return createVectorStoreFileBatch( + this._client, + vectorStoreId, + fileIds, + options, + ); + } + + /** Retrieve a vector store file batch. */ + getVectorStoreFileBatch( + vectorStoreId: string, + batchId: string, + options: GetVectorStoreFileBatchOptionalParams = { requestOptions: {} }, + ): Promise { + return getVectorStoreFileBatch( + this._client, + vectorStoreId, + batchId, + options, + ); + } + + /** Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible. */ + cancelVectorStoreFileBatch( + vectorStoreId: string, + batchId: string, + options: CancelVectorStoreFileBatchOptionalParams = { requestOptions: {} }, + ): Promise { + return cancelVectorStoreFileBatch( + this._client, + vectorStoreId, + batchId, + options, + ); + } + + /** Returns a list of vector store files in a batch. */ + listVectorStoreFileBatchFiles( + vectorStoreId: string, + batchId: string, + options: ListVectorStoreFileBatchFilesOptionalParams = { + requestOptions: {}, + }, + ): Promise { + return listVectorStoreFileBatchFiles( + this._client, + vectorStoreId, + batchId, + options, + ); + } +} diff --git a/sdk/openai/openai/src/helpers/serializerHelpers.ts b/sdk/openai/openai/src/helpers/serializerHelpers.ts new file mode 100644 index 000000000000..332381cdb695 --- /dev/null +++ b/sdk/openai/openai/src/helpers/serializerHelpers.ts @@ -0,0 +1,40 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export function serializeRecord< + T extends string | number | boolean | Date | null, + R, +>(item: Record): Record; +export function serializeRecord( + item: Record, + serializer: (item: T) => R, +): Record; +export function serializeRecord( + item: Record, + serializer?: (item: T) => R, +): Record { + return Object.keys(item).reduce( + (acc, key) => { + if (isSupportedRecordType(item[key])) { + acc[key] = item[key] as any; + } else if (serializer) { + const value = item[key]; + if (value !== undefined) { + acc[key] = serializer(value); + } + } else { + console.warn(`Don't know how to serialize ${item[key]}`); + acc[key] = item[key] as any; + } + return acc; + }, + {} as Record, + ); +} + +function isSupportedRecordType(t: any) { + return ( + ["number", "string", "boolean", "null"].includes(typeof t) || + t instanceof Date + ); +} diff --git a/sdk/openai/openai/src/index.ts b/sdk/openai/openai/src/index.ts index 954f16312229..feef23681265 100644 --- a/sdk/openai/openai/src/index.ts +++ b/sdk/openai/openai/src/index.ts @@ -1,138 +1,209 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -/** * - * [Azure OpenAI](https://learn.microsoft.com/azure/cognitive-services/openai/overview) - * provides REST API access to OpenAI's powerful language models including the GPT-3, - * Codex and Embeddings model series. In addition, the new GPT-4 and ChatGPT (gpt-35-turbo) - * model series have now reached general availability. These models can be easily adapted - * to your specific task including but not limited to content generation, summarization, - * semantic search, and natural language to code translation. - * - * @packageDocumentation - */ - -export { AzureKeyCredential } from "@azure/core-auth"; -export { OpenAIClient, OpenAIClientOptions } from "./OpenAIClient.js"; -export { OpenAIKeyCredential } from "./OpenAIKeyCredential.js"; -export * from "./models/audio.js"; export { - AzureChatExtensionConfigurationUnion, - AzureExtensionsOptions, - Completions, - ContentFilterResultsForPrompt, - ContentFilterResultDetailsForPrompt, - ContentFilterResult, - ContentFilterSeverity, - ContentFilterDetectionResult, - ContentFilterBlocklistIdResult, - Choice, - ContentFilterResultsForChoice, - ContentFilterSuccessResultDetailsForPrompt, - ContentFilterErrorResults, - ContentFilterSuccessResultsForChoice, - ContentFilterCitedDetectionResult, - CompletionsLogProbabilityModel, - CompletionsFinishReason, - CompletionsUsage, - ChatRequestMessage, - ChatRequestSystemMessage, - ChatRequestUserMessage, - ChatMessageContentItem, - ChatMessageTextContentItem, - ChatMessageImageContentItem, - ChatMessageImageUrl, - ChatMessageImageDetailLevel, - ChatRequestAssistantMessage, - ChatCompletionsToolCall, - ChatCompletionsFunctionToolCall, - FunctionCall, - ChatRequestToolMessage, - ChatRequestFunctionMessage, - ChatRole, + AssistantsClient, + AssistantsClientOptions, +} from "./assistantsClient.js"; +export { + AssistantCreationOptions, + ToolDefinition, + CodeInterpreterToolDefinition, + FileSearchToolDefinition, + FunctionToolDefinition, FunctionDefinition, - FunctionCallPreset, + CreateToolResourcesOptions, + CreateCodeInterpreterToolResourceOptions, + CreateFileSearchToolResourceVectorStoreOptions, + AssistantsApiResponseFormatMode, + AssistantsApiResponseFormat, + ApiResponseFormat, + Assistant, + ToolResources, + CodeInterpreterToolResource, + FileSearchToolResource, + ListSortOrder, + OpenAIPageableListOfAssistant, + UpdateAssistantOptions, + UpdateToolResourcesOptions, + UpdateCodeInterpreterToolResourceOptions, + UpdateFileSearchToolResourceOptions, + AssistantDeletionStatus, + AssistantThreadCreationOptions, + ThreadMessageOptions, + MessageRole, + MessageAttachment, + AssistantThread, + UpdateAssistantThreadOptions, + ThreadDeletionStatus, + ThreadMessage, + MessageStatus, + MessageIncompleteDetails, + MessageIncompleteDetailsReason, + MessageContent, + MessageTextContent, + MessageTextDetails, + MessageTextAnnotation, + MessageTextFileCitationAnnotation, + MessageTextFileCitationDetails, + MessageTextFilePathAnnotation, + MessageTextFilePathDetails, + MessageImageFileContent, + MessageImageFileDetails, + OpenAIPageableListOfThreadMessage, + CreateRunOptions, + TruncationObject, + TruncationStrategy, + AssistantsApiToolChoiceOptionMode, + AssistantsNamedToolChoice, + AssistantsNamedToolChoiceType, FunctionName, - AzureChatExtensionConfiguration, - AzureSearchChatExtensionConfiguration, - OnYourDataAuthenticationOptions, - OnYourDataApiKeyAuthenticationOptions, - OnYourDataConnectionStringAuthenticationOptions, - OnYourDataKeyAndKeyIdAuthenticationOptions, - OnYourDataEncodedApiKeyAuthenticationOptions, - OnYourDataAccessTokenAuthenticationOptions, - OnYourDataSystemAssignedManagedIdentityAuthenticationOptions, - OnYourDataUserAssignedManagedIdentityAuthenticationOptions, - OnYourDataAuthenticationType, - AzureSearchIndexFieldMappingOptions, - AzureSearchQueryType, - OnYourDataVectorizationSource, - OnYourDataEndpointVectorizationSource, - OnYourDataDeploymentNameVectorizationSource, - OnYourDataModelIdVectorizationSource, - OnYourDataVectorizationSourceType, - AzureMachineLearningIndexChatExtensionConfiguration, - AzureCosmosDBChatExtensionConfiguration, - AzureCosmosDBFieldMappingOptions, - ElasticsearchChatExtensionConfiguration, - ElasticsearchIndexFieldMappingOptions, - ElasticsearchQueryType, - PineconeChatExtensionConfiguration, - PineconeFieldMappingOptions, - AzureChatExtensionType, - AzureChatEnhancementConfiguration, - AzureChatGroundingEnhancementConfiguration, - AzureChatOCREnhancementConfiguration, - ChatCompletionsResponseFormat, - ChatCompletionsTextResponseFormat, - ChatCompletionsJsonResponseFormat, - ChatCompletionsToolDefinition, - ChatCompletionsFunctionToolDefinition, - ChatCompletionsToolSelectionPreset, - ChatCompletionsNamedToolSelection, - ChatCompletionsNamedFunctionToolSelection, - ChatCompletionsFunctionToolSelection, - ChatCompletions, - ChatChoice, - ChatResponseMessage, - AzureChatExtensionsMessageContext, - AzureChatExtensionDataSourceResponseCitation, - ChatChoiceLogProbabilityInfo, - ChatTokenLogProbabilityResult, - ChatTokenLogProbabilityInfo, - ChatFinishDetails, - StopFinishDetails, - MaxTokensFinishDetails, - AzureChatEnhancements, - AzureGroundingEnhancement, - AzureGroundingEnhancementLine, - AzureGroundingEnhancementLineSpan, - AzureGroundingEnhancementCoordinatePoint, - ImageSize, - ImageGenerationResponseFormat, - ImageGenerationQuality, - ImageGenerationStyle, - ImageGenerations, - ImageGenerationData, - ImageGenerationContentFilterResults, - ImageGenerationPromptFilterResults, - Embeddings, - EmbeddingItem, - EmbeddingsUsage, - ChatRequestMessageUnion, - ChatMessageContentItemUnion, - ChatCompletionsToolCallUnion, - OnYourDataAuthenticationOptionsUnion, - OnYourDataVectorizationSourceUnion, - ChatCompletionsResponseFormatUnion, - ChatCompletionsToolDefinitionUnion, - ChatCompletionsNamedToolSelectionUnion, - ChatFinishDetailsUnion, - GetCompletionsOptions, - GetChatCompletionsOptions, - GetImagesOptions, - GetEmbeddingsOptions, - EventStream, - OpenAIError, + ThreadRun, + RunStatus, + RequiredAction, + SubmitToolOutputsAction, + SubmitToolOutputsDetails, + RequiredToolCall, + RequiredFunctionToolCall, + RequiredFunctionToolCallDetails, + RunError, + IncompleteRunDetails, + RunCompletionUsage, + OpenAIPageableListOfThreadRun, + ToolOutput, + CreateAndRunThreadOptions, + RunStep, + RunStepType, + RunStepStatus, + RunStepDetails, + RunStepMessageCreationDetails, + RunStepMessageCreationReference, + RunStepToolCallDetails, + RunStepToolCall, + RunStepCodeInterpreterToolCall, + RunStepCodeInterpreterToolCallDetails, + RunStepCodeInterpreterToolCallOutput, + RunStepCodeInterpreterLogOutput, + RunStepCodeInterpreterImageOutput, + RunStepCodeInterpreterImageReference, + RunStepFileSearchToolCall, + RunStepFunctionToolCall, + RunStepFunctionToolCallDetails, + RunStepError, + RunStepErrorCode, + RunStepCompletionUsage, + OpenAIPageableListOfRunStep, + FilePurpose, + FileListResponse, + OpenAIFile, + FileState, + FileDeletionStatus, + OpenAIPageableListOfVectorStore, + VectorStore, + VectorStoreFileCount, + VectorStoreStatus, + VectorStoreExpirationPolicy, + VectorStoreExpirationPolicyAnchor, + VectorStoreOptions, + VectorStoreUpdateOptions, + VectorStoreDeletionStatus, + VectorStoreFileStatusFilter, + OpenAIPageableListOfVectorStoreFile, + VectorStoreFile, + VectorStoreFileStatus, + VectorStoreFileError, + VectorStoreFileErrorCode, + VectorStoreFileDeletionStatus, + VectorStoreFileBatch, + VectorStoreFileBatchStatus, + MessageDeltaChunk, + MessageDelta, + MessageDeltaContent, + MessageDeltaImageFileContent, + MessageDeltaImageFileContentObject, + MessageDeltaTextContentObject, + MessageDeltaTextContent, + MessageDeltaTextAnnotation, + MessageDeltaTextFileCitationAnnotationObject, + MessageDeltaTextFileCitationAnnotation, + MessageDeltaTextFilePathAnnotationObject, + MessageDeltaTextFilePathAnnotation, + RunStepDeltaChunk, + RunStepDelta, + RunStepDeltaDetail, + RunStepDeltaMessageCreation, + RunStepDeltaMessageCreationObject, + RunStepDeltaToolCallObject, + RunStepDeltaToolCall, + RunStepDeltaFunctionToolCall, + RunStepDeltaFunction, + RunStepDeltaFileSearchToolCall, + RunStepDeltaCodeInterpreterToolCall, + RunStepDeltaCodeInterpreterDetailItemObject, + RunStepDeltaCodeInterpreterOutput, + RunStepDeltaCodeInterpreterLogOutput, + RunStepDeltaCodeInterpreterImageOutput, + RunStepDeltaCodeInterpreterImageOutputObject, + ThreadStreamEvent, + RunStreamEvent, + RunStepStreamEvent, + MessageStreamEvent, + ErrorEvent, + DoneEvent, + ServiceApiVersions, + ToolDefinitionUnion, + CreateFileSearchToolResourceOptions, + MessageAttachmentToolDefinition, + MessageContentUnion, + MessageTextAnnotationUnion, + RequiredActionUnion, + RequiredToolCallUnion, + RunStepDetailsUnion, + RunStepToolCallUnion, + RunStepCodeInterpreterToolCallOutputUnion, + MessageDeltaContentUnion, + MessageDeltaTextAnnotationUnion, + RunStepDeltaDetailUnion, + RunStepDeltaToolCallUnion, + RunStepDeltaCodeInterpreterOutputUnion, + CreateAssistantOptionalParams, + ListAssistantsOptionalParams, + GetAssistantOptionalParams, + UpdateAssistantOptionalParams, + DeleteAssistantOptionalParams, + CreateThreadOptionalParams, + GetThreadOptionalParams, + UpdateThreadOptionalParams, + DeleteThreadOptionalParams, + CreateMessageOptionalParams, + ListMessagesOptionalParams, + GetMessageOptionalParams, + UpdateMessageOptionalParams, + CreateRunOptionalParams, + ListRunsOptionalParams, + GetRunOptionalParams, + UpdateRunOptionalParams, + SubmitToolOutputsToRunOptionalParams, + CancelRunOptionalParams, + CreateThreadAndRunOptionalParams, + GetRunStepOptionalParams, + ListRunStepsOptionalParams, + ListFilesOptionalParams, + UploadFileOptionalParams, + DeleteFileOptionalParams, + GetFileOptionalParams, + GetFileContentOptionalParams, + ListVectorStoresOptionalParams, + CreateVectorStoreOptionalParams, + GetVectorStoreOptionalParams, + ModifyVectorStoreOptionalParams, + DeleteVectorStoreOptionalParams, + ListVectorStoreFilesOptionalParams, + CreateVectorStoreFileOptionalParams, + GetVectorStoreFileOptionalParams, + DeleteVectorStoreFileOptionalParams, + CreateVectorStoreFileBatchOptionalParams, + GetVectorStoreFileBatchOptionalParams, + CancelVectorStoreFileBatchOptionalParams, + ListVectorStoreFileBatchFilesOptionalParams, } from "./models/index.js"; -export { isOpenAIError } from "./api/index.js"; diff --git a/sdk/openai/openai/src/logger.ts b/sdk/openai/openai/src/logger.ts index 5679314738ce..2dca9f2a0d1a 100644 --- a/sdk/openai/openai/src/logger.ts +++ b/sdk/openai/openai/src/logger.ts @@ -2,4 +2,4 @@ // Licensed under the MIT license. import { createClientLogger } from "@azure/logger"; -export const logger = createClientLogger("openai"); +export const logger = createClientLogger("openai-assistants"); diff --git a/sdk/openai/openai/src/models/audio.ts b/sdk/openai/openai/src/models/audio.ts deleted file mode 100644 index 7e405b351c1a..000000000000 --- a/sdk/openai/openai/src/models/audio.ts +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -/** - * THIS IS AN AUTO-GENERATED FILE - DO NOT EDIT! - * - * Any changes you make here may be lost. - * - * If you need to make changes, please do so in the original source file, \{project-root\}/sources/custom - */ - -import { OperationOptions } from "@azure-rest/core-client"; - -/** Simple transcription response */ -export interface AudioResultSimpleJson { - /** Transcribed text. */ - text: string; -} - -/** Transcription response. */ -export interface AudioResultVerboseJson extends AudioResultSimpleJson { - /** Audio transcription task. */ - task: AudioTranscriptionTask; - /** Language detected in the source audio file. */ - language: string; - /** Duration. */ - duration: number; - /** Segments. */ - segments: AudioSegment[]; -} - -/** Transcription segment. */ -export interface AudioSegment { - /** Segment identifier. */ - id: number; - /** Segment start offset. */ - start: number; - /** Segment end offset. */ - end: number; - /** Segment text. */ - text: string; - /** Temperature. */ - temperature: number; - /** Average log probability. */ - avgLogprob: number; - /** Compression ratio. */ - compressionRatio: number; - /** Probability of 'no speech'. */ - noSpeechProb: number; - /** Tokens in this segment */ - tokens: number[]; - /** TODO */ - seek: number; -} - -/** The options for an audio transcription request */ -export interface GetAudioTranscriptionOptions extends OperationOptions { - /** An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language. */ - prompt?: string; - /** - * The sampling temperature, between 0 and 1. - * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - * If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. - */ - temperature?: number; - /** The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency. */ - language?: string; -} - -/** The options for an audio translation request */ -export interface GetAudioTranslationOptions extends OperationOptions { - /** An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language. */ - prompt?: string; - /** - * The sampling temperature, between 0 and 1. - * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - * If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. - */ - temperature?: number; -} - -/** The result format of an audio task */ -export type AudioResultFormat = - | "json" - /** This format will return an JSON structure containing an enriched structure with the transcription. */ - | "verbose_json" - /** This will make the response return the transcription as plain/text. */ - | "text" - /** The transcription will be provided in SRT format (SubRip Text) in the form of plain/text. */ - | "srt" - /** The transcription will be provided in VTT format (Web Video Text Tracks) in the form of plain/text. */ - | "vtt"; -/** Audio transcription task type */ -/** "transcribe", "translate" */ -export type AudioTranscriptionTask = string; -/** The type of the result of the transcription based on the requested response format */ -export type AudioResult = { - json: AudioResultSimpleJson; - verbose_json: AudioResultVerboseJson; - vtt: string; - srt: string; - text: string; -}[ResponseFormat]; diff --git a/sdk/openai/openai/src/models/index.ts b/sdk/openai/openai/src/models/index.ts index e206c42c565e..8f7bea546d7d 100644 --- a/sdk/openai/openai/src/models/index.ts +++ b/sdk/openai/openai/src/models/index.ts @@ -2,135 +2,206 @@ // Licensed under the MIT license. export { - AudioTranscriptionOptions, - AudioTranscriptionFormat, - AudioTranscription, - AudioTaskLabel, - AudioTranscriptionSegment, - AudioTranslationOptions, - AudioTranslationFormat, - AudioTranslation, - AudioTranslationSegment, - AzureExtensionsOptions, - CompletionsOptions, - Completions, - ContentFilterErrorResults, - ContentFilterSuccessResultDetailsForPrompt, - ContentFilterSuccessResultsForChoice, - ContentFilterResultsForPrompt, - ContentFilterResultDetailsForPrompt, - ContentFilterResult, - ContentFilterSeverity, - ContentFilterDetectionResult, - ContentFilterBlocklistIdResult, - Choice, - ContentFilterResultsForChoice, - ContentFilterCitedDetectionResult, - CompletionsLogProbabilityModel, - CompletionsFinishReason, - CompletionsUsage, - ChatRequestMessage, - ChatRequestSystemMessage, - ChatRequestUserMessage, - ChatMessageContentItem, - ChatMessageTextContentItem, - ChatMessageImageContentItem, - ChatMessageImageUrl, - ChatMessageImageDetailLevel, - ChatRequestAssistantMessage, - ChatCompletionsToolCall, - ChatCompletionsFunctionToolCall, - FunctionCall, - ChatRequestToolMessage, - ChatRequestFunctionMessage, - ChatRole, + AssistantCreationOptions, + ToolDefinition, + CodeInterpreterToolDefinition, + FileSearchToolDefinition, + FunctionToolDefinition, FunctionDefinition, - FunctionCallPreset, + CreateToolResourcesOptions, + CreateCodeInterpreterToolResourceOptions, + CreateFileSearchToolResourceVectorStoreOptions, + AssistantsApiResponseFormatMode, + AssistantsApiResponseFormat, + ApiResponseFormat, + Assistant, + ToolResources, + CodeInterpreterToolResource, + FileSearchToolResource, + ListSortOrder, + OpenAIPageableListOfAssistant, + UpdateAssistantOptions, + UpdateToolResourcesOptions, + UpdateCodeInterpreterToolResourceOptions, + UpdateFileSearchToolResourceOptions, + AssistantDeletionStatus, + AssistantThreadCreationOptions, + ThreadMessageOptions, + MessageRole, + MessageAttachment, + AssistantThread, + UpdateAssistantThreadOptions, + ThreadDeletionStatus, + ThreadMessage, + MessageStatus, + MessageIncompleteDetails, + MessageIncompleteDetailsReason, + MessageContent, + MessageTextContent, + MessageTextDetails, + MessageTextAnnotation, + MessageTextFileCitationAnnotation, + MessageTextFileCitationDetails, + MessageTextFilePathAnnotation, + MessageTextFilePathDetails, + MessageImageFileContent, + MessageImageFileDetails, + OpenAIPageableListOfThreadMessage, + CreateRunOptions, + TruncationObject, + TruncationStrategy, + AssistantsApiToolChoiceOptionMode, + AssistantsNamedToolChoice, + AssistantsNamedToolChoiceType, FunctionName, - AzureChatExtensionConfiguration, - AzureSearchChatExtensionConfiguration, - OnYourDataAuthenticationOptions, - OnYourDataApiKeyAuthenticationOptions, - OnYourDataConnectionStringAuthenticationOptions, - OnYourDataKeyAndKeyIdAuthenticationOptions, - OnYourDataEncodedApiKeyAuthenticationOptions, - OnYourDataAccessTokenAuthenticationOptions, - OnYourDataSystemAssignedManagedIdentityAuthenticationOptions, - OnYourDataUserAssignedManagedIdentityAuthenticationOptions, - OnYourDataAuthenticationType, - AzureChatExtensionConfigurationUnion, - AzureSearchIndexFieldMappingOptions, - AzureSearchQueryType, - OnYourDataVectorizationSource, - OnYourDataEndpointVectorizationSource, - OnYourDataDeploymentNameVectorizationSource, - OnYourDataModelIdVectorizationSource, - OnYourDataVectorizationSourceType, - AzureMachineLearningIndexChatExtensionConfiguration, - AzureCosmosDBChatExtensionConfiguration, - AzureCosmosDBFieldMappingOptions, - ElasticsearchChatExtensionConfiguration, - ElasticsearchIndexFieldMappingOptions, - ElasticsearchQueryType, - PineconeChatExtensionConfiguration, - PineconeFieldMappingOptions, - AzureChatExtensionType, - AzureChatEnhancementConfiguration, - AzureChatGroundingEnhancementConfiguration, - AzureChatOCREnhancementConfiguration, - ChatCompletionsResponseFormat, - ChatCompletionsTextResponseFormat, - ChatCompletionsJsonResponseFormat, - ChatCompletionsToolDefinition, - ChatCompletionsFunctionToolDefinition, - ChatCompletionsToolSelectionPreset, - ChatCompletionsNamedToolSelection, - ChatCompletionsNamedFunctionToolSelection, - ChatCompletionsFunctionToolSelection, - ChatCompletions, - ChatChoice, - ChatResponseMessage, - AzureChatExtensionsMessageContext, - AzureChatExtensionDataSourceResponseCitation, - ChatChoiceLogProbabilityInfo, - ChatTokenLogProbabilityResult, - ChatTokenLogProbabilityInfo, - ChatFinishDetails, - StopFinishDetails, - MaxTokensFinishDetails, - AzureChatEnhancements, - AzureGroundingEnhancement, - AzureGroundingEnhancementLine, - AzureGroundingEnhancementLineSpan, - AzureGroundingEnhancementCoordinatePoint, - ImageGenerationOptions, - ImageSize, - ImageGenerationResponseFormat, - ImageGenerationQuality, - ImageGenerationStyle, - ImageGenerations, - ImageGenerationData, - ImageGenerationContentFilterResults, - ImageGenerationPromptFilterResults, - EmbeddingsOptions, - Embeddings, - EmbeddingItem, - EmbeddingsUsage, - EventStream, - ChatRequestMessageUnion, - ChatMessageContentItemUnion, - ChatCompletionsToolCallUnion, - OnYourDataAuthenticationOptionsUnion, - OnYourDataVectorizationSourceUnion, - ChatCompletionsResponseFormatUnion, - ChatCompletionsToolDefinitionUnion, - ChatCompletionsNamedToolSelectionUnion, - ChatFinishDetailsUnion, - OpenAIError, + ThreadRun, + RunStatus, + RequiredAction, + SubmitToolOutputsAction, + SubmitToolOutputsDetails, + RequiredToolCall, + RequiredFunctionToolCall, + RequiredFunctionToolCallDetails, + RunError, + IncompleteRunDetails, + RunCompletionUsage, + OpenAIPageableListOfThreadRun, + ToolOutput, + CreateAndRunThreadOptions, + RunStep, + RunStepType, + RunStepStatus, + RunStepDetails, + RunStepMessageCreationDetails, + RunStepMessageCreationReference, + RunStepToolCallDetails, + RunStepToolCall, + RunStepCodeInterpreterToolCall, + RunStepCodeInterpreterToolCallDetails, + RunStepCodeInterpreterToolCallOutput, + RunStepCodeInterpreterLogOutput, + RunStepCodeInterpreterImageOutput, + RunStepCodeInterpreterImageReference, + RunStepFileSearchToolCall, + RunStepFunctionToolCall, + RunStepFunctionToolCallDetails, + RunStepError, + RunStepErrorCode, + RunStepCompletionUsage, + OpenAIPageableListOfRunStep, + FilePurpose, + FileListResponse, + OpenAIFile, + FileState, + FileDeletionStatus, + OpenAIPageableListOfVectorStore, + VectorStore, + VectorStoreFileCount, + VectorStoreStatus, + VectorStoreExpirationPolicy, + VectorStoreExpirationPolicyAnchor, + VectorStoreOptions, + VectorStoreUpdateOptions, + VectorStoreDeletionStatus, + VectorStoreFileStatusFilter, + OpenAIPageableListOfVectorStoreFile, + VectorStoreFile, + VectorStoreFileStatus, + VectorStoreFileError, + VectorStoreFileErrorCode, + VectorStoreFileDeletionStatus, + VectorStoreFileBatch, + VectorStoreFileBatchStatus, + MessageDeltaChunk, + MessageDelta, + MessageDeltaContent, + MessageDeltaImageFileContent, + MessageDeltaImageFileContentObject, + MessageDeltaTextContentObject, + MessageDeltaTextContent, + MessageDeltaTextAnnotation, + MessageDeltaTextFileCitationAnnotationObject, + MessageDeltaTextFileCitationAnnotation, + MessageDeltaTextFilePathAnnotationObject, + MessageDeltaTextFilePathAnnotation, + RunStepDeltaChunk, + RunStepDelta, + RunStepDeltaDetail, + RunStepDeltaMessageCreation, + RunStepDeltaMessageCreationObject, + RunStepDeltaToolCallObject, + RunStepDeltaToolCall, + RunStepDeltaFunctionToolCall, + RunStepDeltaFunction, + RunStepDeltaFileSearchToolCall, + RunStepDeltaCodeInterpreterToolCall, + RunStepDeltaCodeInterpreterDetailItemObject, + RunStepDeltaCodeInterpreterOutput, + RunStepDeltaCodeInterpreterLogOutput, + RunStepDeltaCodeInterpreterImageOutput, + RunStepDeltaCodeInterpreterImageOutputObject, + ThreadStreamEvent, + RunStreamEvent, + RunStepStreamEvent, + MessageStreamEvent, + ErrorEvent, + DoneEvent, + ServiceApiVersions, + ToolDefinitionUnion, + CreateFileSearchToolResourceOptions, + MessageAttachmentToolDefinition, + MessageContentUnion, + MessageTextAnnotationUnion, + RequiredActionUnion, + RequiredToolCallUnion, + RunStepDetailsUnion, + RunStepToolCallUnion, + RunStepCodeInterpreterToolCallOutputUnion, + MessageDeltaContentUnion, + MessageDeltaTextAnnotationUnion, + RunStepDeltaDetailUnion, + RunStepDeltaToolCallUnion, + RunStepDeltaCodeInterpreterOutputUnion, } from "./models.js"; export { - GetCompletionsOptions, - GetImagesOptions, - GetChatCompletionsOptions, - GetEmbeddingsOptions, + CreateAssistantOptionalParams, + ListAssistantsOptionalParams, + GetAssistantOptionalParams, + UpdateAssistantOptionalParams, + DeleteAssistantOptionalParams, + CreateThreadOptionalParams, + GetThreadOptionalParams, + UpdateThreadOptionalParams, + DeleteThreadOptionalParams, + CreateMessageOptionalParams, + ListMessagesOptionalParams, + GetMessageOptionalParams, + UpdateMessageOptionalParams, + CreateRunOptionalParams, + ListRunsOptionalParams, + GetRunOptionalParams, + UpdateRunOptionalParams, + SubmitToolOutputsToRunOptionalParams, + CancelRunOptionalParams, + CreateThreadAndRunOptionalParams, + GetRunStepOptionalParams, + ListRunStepsOptionalParams, + ListFilesOptionalParams, + UploadFileOptionalParams, + DeleteFileOptionalParams, + GetFileOptionalParams, + GetFileContentOptionalParams, + ListVectorStoresOptionalParams, + CreateVectorStoreOptionalParams, + GetVectorStoreOptionalParams, + ModifyVectorStoreOptionalParams, + DeleteVectorStoreOptionalParams, + ListVectorStoreFilesOptionalParams, + CreateVectorStoreFileOptionalParams, + GetVectorStoreFileOptionalParams, + DeleteVectorStoreFileOptionalParams, + CreateVectorStoreFileBatchOptionalParams, + GetVectorStoreFileBatchOptionalParams, + CancelVectorStoreFileBatchOptionalParams, + ListVectorStoreFileBatchFilesOptionalParams, } from "./options.js"; diff --git a/sdk/openai/openai/src/models/models.ts b/sdk/openai/openai/src/models/models.ts index 6e275b82afea..1fa04851a261 100644 --- a/sdk/openai/openai/src/models/models.ts +++ b/sdk/openai/openai/src/models/models.ts @@ -1,1817 +1,2212 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import { ErrorModel } from "@azure-rest/core-client"; -import { RestError, RestErrorOptions } from "@azure/core-rest-pipeline"; - -/** The configuration information for an audio transcription request. */ -export interface AudioTranscriptionOptions { - /** - * The audio data to transcribe. This must be the binary content of a file in one of the supported media formats: - * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. - */ - file: Uint8Array; - /** The optional filename or descriptive identifier to associate with with the audio data. */ - filename?: string; - /** The requested format of the transcription response data, which will influence the content and detail of the result. */ - responseFormat?: AudioTranscriptionFormat; +import { serializeRecord } from "../helpers/serializerHelpers.js"; +import { + AssistantCreationOptions as AssistantCreationOptionsRest, + ToolDefinition as ToolDefinitionRest, + CodeInterpreterToolDefinition as CodeInterpreterToolDefinitionRest, + FileSearchToolDefinition as FileSearchToolDefinitionRest, + FunctionToolDefinition as FunctionToolDefinitionRest, + FunctionDefinition as FunctionDefinitionRest, + CreateToolResourcesOptions as CreateToolResourcesOptionsRest, + CreateCodeInterpreterToolResourceOptions as CreateCodeInterpreterToolResourceOptionsRest, + CreateFileSearchToolResourceVectorStoreOptions as CreateFileSearchToolResourceVectorStoreOptionsRest, + AssistantsApiResponseFormat as AssistantsApiResponseFormatRest, + UpdateAssistantOptions as UpdateAssistantOptionsRest, + UpdateToolResourcesOptions as UpdateToolResourcesOptionsRest, + UpdateCodeInterpreterToolResourceOptions as UpdateCodeInterpreterToolResourceOptionsRest, + UpdateFileSearchToolResourceOptions as UpdateFileSearchToolResourceOptionsRest, + AssistantThreadCreationOptions as AssistantThreadCreationOptionsRest, + ThreadMessageOptions as ThreadMessageOptionsRest, + MessageAttachment as MessageAttachmentRest, + UpdateAssistantThreadOptions as UpdateAssistantThreadOptionsRest, + ThreadMessage as ThreadMessageRest, + MessageIncompleteDetails as MessageIncompleteDetailsRest, + MessageContent as MessageContentRest, + MessageTextContent as MessageTextContentRest, + MessageTextDetails as MessageTextDetailsRest, + MessageTextAnnotation as MessageTextAnnotationRest, + MessageTextFileCitationAnnotation as MessageTextFileCitationAnnotationRest, + MessageTextFileCitationDetails as MessageTextFileCitationDetailsRest, + MessageTextFilePathAnnotation as MessageTextFilePathAnnotationRest, + MessageTextFilePathDetails as MessageTextFilePathDetailsRest, + MessageImageFileContent as MessageImageFileContentRest, + MessageImageFileDetails as MessageImageFileDetailsRest, + CreateRunOptions as CreateRunOptionsRest, + TruncationObject as TruncationObjectRest, + AssistantsNamedToolChoice as AssistantsNamedToolChoiceRest, + FunctionName as FunctionNameRest, + ToolOutput as ToolOutputRest, + CreateAndRunThreadOptions as CreateAndRunThreadOptionsRest, + VectorStoreExpirationPolicy as VectorStoreExpirationPolicyRest, + VectorStoreOptions as VectorStoreOptionsRest, + VectorStoreUpdateOptions as VectorStoreUpdateOptionsRest, +} from "../rest/index.js"; + +/** The request details to use when creating a new assistant. */ +export interface AssistantCreationOptions { + /** The ID of the model to use. */ + model: string; + /** The name of the new assistant. */ + name?: string | null; + /** The description of the new assistant. */ + description?: string | null; + /** The system instructions for the new assistant to use. */ + instructions?: string | null; + /** The collection of tools to enable for the new assistant. */ + tools?: ToolDefinitionUnion[]; /** - * The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code - * such as 'en' or 'fr'. - * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. + * A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` + * tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. */ - language?: string; + toolResources?: CreateToolResourcesOptions | null; /** - * An optional hint to guide the model's style or continue from a prior audio segment. The written language of the - * prompt should match the primary spoken language of the audio data. + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + * while lower values like 0.2 will make it more focused and deterministic. */ - prompt?: string; + temperature?: number | null; /** - * The sampling temperature, between 0 and 1. - * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - * If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. - */ - temperature?: number; - /** The model to use for this transcription request. */ - model?: string; + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + * So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + topP?: number | null; + /** The response format of the tool calls used by this assistant. */ + responseFormat?: + | string + | AssistantsApiResponseFormatMode + | AssistantsApiResponseFormat; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record; +} + +export function assistantCreationOptionsSerializer( + item: AssistantCreationOptions, +): AssistantCreationOptionsRest { + return { + model: item["model"], + name: item["name"], + description: item["description"], + instructions: item["instructions"], + tools: item["tools"], + tool_resources: !item.toolResources + ? item.toolResources + : createToolResourcesOptionsSerializer(item.toolResources), + temperature: item["temperature"], + top_p: item["topP"], + response_format: item["responseFormat"], + metadata: !item.metadata + ? item.metadata + : (serializeRecord(item.metadata as any) as any), + }; +} + +/** An abstract representation of an input tool definition that an assistant can use. */ +export interface ToolDefinition { + /** the discriminator possible values: code_interpreter, file_search, function */ + type: string; } -/** Defines available options for the underlying response format of output transcription information. */ -/** "json", "verbose_json", "text", "srt", "vtt" */ -export type AudioTranscriptionFormat = string; +export function toolDefinitionUnionSerializer(item: ToolDefinitionUnion) { + switch (item.type) { + case "code_interpreter": + return codeInterpreterToolDefinitionSerializer( + item as CodeInterpreterToolDefinition, + ); -/** Result information for an operation that transcribed spoken audio into written text. */ -export interface AudioTranscription { - /** The transcribed text for the provided audio data. */ - text: string; - /** The label that describes which operation type generated the accompanying response data. */ - task?: AudioTaskLabel; - /** - * The spoken language that was detected in the transcribed audio data. - * This is expressed as a two-letter ISO-639-1 language code like 'en' or 'fr'. - */ - language?: string; - /** The total duration of the audio processed to produce accompanying transcription information. */ - duration?: number; - /** A collection of information about the timing, probabilities, and other detail of each processed audio segment. */ - segments?: AudioTranscriptionSegment[]; + case "file_search": + return fileSearchToolDefinitionSerializer( + item as FileSearchToolDefinition, + ); + + case "function": + return functionToolDefinitionSerializer(item as FunctionToolDefinition); + + default: + return toolDefinitionSerializer(item); + } } -/** Defines the possible descriptors for available audio operation responses. */ -/** "transcribe", "translate" */ -export type AudioTaskLabel = string; +export function toolDefinitionSerializer( + item: ToolDefinitionUnion, +): ToolDefinitionRest { + return { + type: item["type"], + }; +} -/** - * Extended information about a single segment of transcribed audio data. - * Segments generally represent roughly 5-10 seconds of speech. Segment boundaries typically occur between words but not - * necessarily sentences. - */ -export interface AudioTranscriptionSegment { - /** The 0-based index of this segment within a transcription. */ - id: number; - /** The time at which this segment started relative to the beginning of the transcribed audio. */ - start: number; - /** The time at which this segment ended relative to the beginning of the transcribed audio. */ - end: number; - /** The transcribed text that was part of this audio segment. */ - text: string; - /** The temperature score associated with this audio segment. */ - temperature: number; - /** The average log probability associated with this audio segment. */ - avgLogprob: number; - /** The compression ratio of this audio segment. */ - compressionRatio: number; - /** The probability of no speech detection within this audio segment. */ - noSpeechProb: number; - /** The token IDs matching the transcribed text in this audio segment. */ - tokens: number[]; - /** - * The seek position associated with the processing of this audio segment. - * Seek positions are expressed as hundredths of seconds. - * The model may process several segments from a single seek position, so while the seek position will never represent - * a later time than the segment's start, the segment's start may represent a significantly later time than the - * segment's associated seek position. - */ - seek: number; +/** The input definition information for a code interpreter tool as used to configure an assistant. */ +export interface CodeInterpreterToolDefinition extends ToolDefinition { + /** The object type, which is always 'code_interpreter'. */ + type: "code_interpreter"; } -/** The configuration information for an audio translation request. */ -export interface AudioTranslationOptions { - /** - * The audio data to translate. This must be the binary content of a file in one of the supported media formats: - * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. - */ - file: Uint8Array; - /** The optional filename or descriptive identifier to associate with with the audio data. */ - filename?: string; - /** The requested format of the translation response data, which will influence the content and detail of the result. */ - responseFormat?: AudioTranslationFormat; - /** - * An optional hint to guide the model's style or continue from a prior audio segment. The written language of the - * prompt should match the primary spoken language of the audio data. - */ - prompt?: string; - /** - * The sampling temperature, between 0 and 1. - * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - * If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. - */ - temperature?: number; - /** The model to use for this translation request. */ - model?: string; +export function codeInterpreterToolDefinitionSerializer( + item: CodeInterpreterToolDefinition, +): CodeInterpreterToolDefinitionRest { + return { + type: item["type"], + }; } -/** Defines available options for the underlying response format of output translation information. */ -/** "json", "verbose_json", "text", "srt", "vtt" */ -export type AudioTranslationFormat = string; +/** The input definition information for a file search tool as used to configure an assistant. */ +export interface FileSearchToolDefinition extends ToolDefinition { + /** The object type, which is always 'file_search'. */ + type: "file_search"; +} -/** Result information for an operation that translated spoken audio into written text. */ -export interface AudioTranslation { - /** The translated text for the provided audio data. */ - text: string; - /** The label that describes which operation type generated the accompanying response data. */ - task?: AudioTaskLabel; - /** - * The spoken language that was detected in the translated audio data. - * This is expressed as a two-letter ISO-639-1 language code like 'en' or 'fr'. - */ - language?: string; - /** The total duration of the audio processed to produce accompanying translation information. */ - duration?: number; - /** A collection of information about the timing, probabilities, and other detail of each processed audio segment. */ - segments?: AudioTranslationSegment[]; +export function fileSearchToolDefinitionSerializer( + item: FileSearchToolDefinition, +): FileSearchToolDefinitionRest { + return { + type: item["type"], + }; } -/** - * Options for Azure OpenAI chat extensions. - */ -export interface AzureExtensionsOptions { - /** - * The configuration entries for Azure OpenAI chat extensions that use them. - * This additional specification is only compatible with Azure OpenAI. - */ - extensions?: AzureChatExtensionConfigurationUnion[]; - /** If provided, the configuration options for available Azure OpenAI chat enhancements. */ - enhancements?: AzureChatEnhancementConfiguration; + +/** The input definition information for a function tool as used to configure an assistant. */ +export interface FunctionToolDefinition extends ToolDefinition { + /** The object type, which is always 'function'. */ + type: "function"; + /** The definition of the concrete function that the function tool should call. */ + function: FunctionDefinition; } -/** - * Extended information about a single segment of translated audio data. - * Segments generally represent roughly 5-10 seconds of speech. Segment boundaries typically occur between words but not - * necessarily sentences. - */ -export interface AudioTranslationSegment { - /** The 0-based index of this segment within a translation. */ - id: number; - /** The time at which this segment started relative to the beginning of the translated audio. */ - start: number; - /** The time at which this segment ended relative to the beginning of the translated audio. */ - end: number; - /** The translated text that was part of this audio segment. */ - text: string; - /** The temperature score associated with this audio segment. */ - temperature: number; - /** The average log probability associated with this audio segment. */ - avgLogprob: number; - /** The compression ratio of this audio segment. */ - compressionRatio: number; - /** The probability of no speech detection within this audio segment. */ - noSpeechProb: number; - /** The token IDs matching the translated text in this audio segment. */ - tokens: number[]; - /** - * The seek position associated with the processing of this audio segment. - * Seek positions are expressed as hundredths of seconds. - * The model may process several segments from a single seek position, so while the seek position will never represent - * a later time than the segment's start, the segment's start may represent a significantly later time than the - * segment's associated seek position. - */ - seek: number; +export function functionToolDefinitionSerializer( + item: FunctionToolDefinition, +): FunctionToolDefinitionRest { + return { + type: item["type"], + function: functionDefinitionSerializer(item.function), + }; } -/** - * The configuration information for a completions request. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. - */ -export interface CompletionsOptions { - /** The prompts to generate completions from. */ - prompt: string[]; - /** The maximum number of tokens to generate. */ - maxTokens?: number; - /** - * The sampling temperature to use that controls the apparent creativity of generated completions. - * Higher values will make output more random while lower values will make results more focused - * and deterministic. - * It is not recommended to modify temperature and topP for the same completions request as the - * interaction of these two settings is difficult to predict. - */ - temperature?: number; - /** - * An alternative to sampling with temperature called nucleus sampling. This value causes the - * model to consider the results of tokens with the provided probability mass. As an example, a - * value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be - * considered. - * It is not recommended to modify temperature and topP for the same completions request as the - * interaction of these two settings is difficult to predict. - */ - topP?: number; - /** - * A map between GPT token IDs and bias scores that influences the probability of specific tokens - * appearing in a completions response. Token IDs are computed via external tokenizer tools, while - * bias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to - * a full ban or exclusive selection of a token, respectively. The exact behavior of a given bias - * score varies by model. - */ - logitBias?: Record; - /** - * An identifier for the caller or end user of the operation. This may be used for tracking - * or rate-limiting purposes. - */ - user?: string; - /** - * The number of completions choices that should be generated per provided prompt as part of an - * overall completions response. - * Because this setting can generate many completions, it may quickly consume your token quota. - * Use carefully and ensure reasonable settings for maxTokens and stop. - */ - n?: number; - /** - * A value that controls the emission of log probabilities for the provided number of most likely - * tokens within a completions response. - */ - logprobs?: number; - /** The suffix that comes after a completion of inserted text */ - suffix?: string; - /** - * A value specifying whether completions responses should include input prompts as prefixes to - * their generated output. - */ - echo?: boolean; - /** A collection of textual sequences that will end completions generation. */ - stop?: string[]; - /** - * A value that influences the probability of generated tokens appearing based on their existing - * presence in generated text. - * Positive values will make tokens less likely to appear when they already exist and increase the - * model's likelihood to output new topics. - */ - presencePenalty?: number; - /** - * A value that influences the probability of generated tokens appearing based on their cumulative - * frequency in generated text. - * Positive values will make tokens less likely to appear as their frequency increases and - * decrease the likelihood of the model repeating the same statements verbatim. - */ - frequencyPenalty?: number; - /** - * A value that controls how many completions will be internally generated prior to response - * formulation. - * When used together with n, bestOf controls the number of candidate completions and must be - * greater than n. - * Because this setting can generate many completions, it may quickly consume your token quota. - * Use carefully and ensure reasonable settings for max_tokens and stop. - */ - bestOf?: number; - /** A value indicating whether chat completions should be streamed for this request. */ - stream?: boolean; - /** - * The model name to provide as part of this completions request. - * Not applicable to Azure OpenAI, where deployment information should be included in the Azure - * resource URI that's connected to. - */ - model?: string; +/** The input definition information for a function. */ +export interface FunctionDefinition { + /** The name of the function to be called. */ + name: string; + /** A description of what the function does, used by the model to choose when and how to call the function. */ + description?: string; + /** The parameters the functions accepts, described as a JSON Schema object. */ + parameters: any; +} + +export function functionDefinitionSerializer( + item: FunctionDefinition, +): FunctionDefinitionRest { + return { + name: item["name"], + description: item["description"], + parameters: item["parameters"], + }; } /** - * Representation of the response data from a completions request. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. + * Request object. A set of resources that are used by the assistant's tools. The resources are specific to the + * type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. */ -export interface Completions { - /** A unique identifier associated with this completions response. */ - id: string; - /** - * The first timestamp associated with generation activity for this completions response, - * represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. - */ - created: Date; - /** - * Content filtering results for zero or more prompts in the request. In a streaming request, - * results for different prompts may arrive at different times or in different orders. - */ - promptFilterResults?: ContentFilterResultsForPrompt[]; +export interface CreateToolResourcesOptions { /** - * The collection of completions choices associated with this completions response. - * Generally, `n` choices are generated per provided prompt with a default value of 1. - * Token limits and other settings may limit the number of choices generated. + * A list of file IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. */ - choices: Choice[]; - /** Usage information for tokens processed and generated as part of this completions operation. */ - usage: CompletionsUsage; + codeInterpreter?: CreateCodeInterpreterToolResourceOptions; + /** A list of vector stores or their IDs made available to the `file_search` tool. */ + fileSearch?: CreateFileSearchToolResourceOptions; } -/** Content filtering results for a single prompt in the request. */ -export interface ContentFilterResultsForPrompt { - /** The index of this prompt in the set of prompt results */ - promptIndex: number; - /** Content filtering results for this prompt */ - contentFilterResults: ContentFilterResultDetailsForPrompt; +export function createToolResourcesOptionsSerializer( + item: CreateToolResourcesOptions, +) { + return { + code_interpreter: !item.codeInterpreter + ? item.codeInterpreter + : createCodeInterpreterToolResourceOptionsSerializer( + item.codeInterpreter, + ), + file_search: item["fileSearch"] as any, + }; } -/** Information about the content filtering category, if it has been detected. */ -export type ContentFilterResultDetailsForPrompt = - | ContentFilterSuccessResultDetailsForPrompt - | ContentFilterErrorResults; -/** Information about the content filtering success result. */ -export interface ContentFilterSuccessResultDetailsForPrompt { - /** - * Describes language related to anatomical organs and genitals, romantic relationships, - * acts portrayed in erotic or affectionate terms, physical sexual acts, including - * those portrayed as an assault or a forced sexual violent act against one’s will, - * prostitution, pornography, and abuse. - */ - sexual?: ContentFilterResult; - /** - * Describes language related to physical actions intended to hurt, injure, damage, or - * kill someone or something; describes weapons, etc. - */ - violence?: ContentFilterResult; - /** - * Describes language attacks or uses that include pejorative or discriminatory language - * with reference to a person or identity group on the basis of certain differentiating - * attributes of these groups including but not limited to race, ethnicity, nationality, - * gender identity and expression, sexual orientation, religion, immigration status, ability - * status, personal appearance, and body size. - */ - hate?: ContentFilterResult; - /** - * Describes language related to physical actions intended to purposely hurt, injure, - * or damage one’s body, or kill oneself. - */ - selfHarm?: ContentFilterResult; - /** - * Describes an error returned if the content filtering system is - * down or otherwise unable to complete the operation in time. - */ - error?: undefined; - /** Describes whether profanity was detected. */ - profanity?: ContentFilterDetectionResult; - /** Describes detection results against configured custom blocklists. */ - customBlocklists?: ContentFilterBlocklistIdResult[]; - /** Whether a jailbreak attempt was detected in the prompt. */ - jailbreak?: ContentFilterDetectionResult; +/** A set of resources that will be used by the `code_interpreter` tool. Request object. */ +export interface CreateCodeInterpreterToolResourceOptions { + /** A list of file IDs made available to the `code_interpreter` tool. */ + fileIds?: string[]; } -/** Information about the content filtering error result. */ -export interface ContentFilterErrorResults { - /** - * Describes an error returned if the content filtering system is - * down or otherwise unable to complete the operation in time. - */ - error: ErrorModel; +export function createCodeInterpreterToolResourceOptionsSerializer( + item: CreateCodeInterpreterToolResourceOptions, +): CreateCodeInterpreterToolResourceOptionsRest { + return { + file_ids: item["fileIds"], + }; } -/** Information about filtered content severity level and if it has been filtered or not. */ -export interface ContentFilterResult { - /** Ratings for the intensity and risk level of filtered content. */ - severity: ContentFilterSeverity; - /** A value indicating whether or not the content has been filtered. */ - filtered: boolean; +/** File IDs associated to the vector store to be passed to the helper. */ +export interface CreateFileSearchToolResourceVectorStoreOptions { + /** A list of file IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. */ + fileIds: string[]; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record; } -/** Ratings for the intensity and risk level of harmful content. */ -/** "safe", "low", "medium", "high" */ -export type ContentFilterSeverity = string; - -/** Represents the outcome of a detection operation performed by content filtering. */ -export interface ContentFilterDetectionResult { - /** A value indicating whether or not the content has been filtered. */ - filtered: boolean; - /** A value indicating whether detection occurred, irrespective of severity or whether the content was filtered. */ - detected: boolean; +export function createFileSearchToolResourceVectorStoreOptionsSerializer( + item: CreateFileSearchToolResourceVectorStoreOptions, +): CreateFileSearchToolResourceVectorStoreOptionsRest { + return { + file_ids: item["fileIds"], + metadata: !item.metadata + ? item.metadata + : (serializeRecord(item.metadata as any) as any), + }; } -/** Represents the outcome of an evaluation against a custom blocklist as performed by content filtering. */ -export interface ContentFilterBlocklistIdResult { - /** The ID of the custom blocklist evaluated. */ - id: string; - /** A value indicating whether or not the content has been filtered. */ - filtered: boolean; -} +/** Represents the mode in which the model will handle the return format of a tool call. */ +export type AssistantsApiResponseFormatMode = "auto" | "none"; /** - * The representation of a single prompt completion as part of an overall completions request. - * Generally, `n` choices are generated per provided prompt with a default value of 1. - * Token limits and other settings may limit the number of choices generated. + * An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. + * If `text` the model can return text or any value needed. */ -export interface Choice { - /** The generated text for a given completions prompt. */ - text: string; - /** The ordered index associated with this completions choice. */ - index: number; - /** - * Information about the content filtering category (hate, sexual, violence, self_harm), if it - * has been detected, as well as the severity level (very_low, low, medium, high-scale that - * determines the intensity and risk level of harmful content) and if it has been filtered or not. - */ - contentFilterResults?: ContentFilterResultsForChoice; - /** The log probabilities model for tokens associated with this completions choice. */ - logprobs: CompletionsLogProbabilityModel | null; - /** Reason for finishing */ - finishReason: CompletionsFinishReason | null; +export interface AssistantsApiResponseFormat { + /** Must be one of `text` or `json_object`. */ + type?: ApiResponseFormat; } -/** Information about the content filtering results, if it has been detected. */ -export type ContentFilterResultsForChoice = - | ContentFilterSuccessResultsForChoice - | ContentFilterErrorResults; +export function assistantsApiResponseFormatSerializer( + item: AssistantsApiResponseFormat, +): AssistantsApiResponseFormatRest { + return { + type: item["type"], + }; +} -/** Information about content filtering evaluated against generated model output. */ -export interface ContentFilterSuccessResultsForChoice { - /** - * Describes language related to anatomical organs and genitals, romantic relationships, - * acts portrayed in erotic or affectionate terms, physical sexual acts, including - * those portrayed as an assault or a forced sexual violent act against one’s will, - * prostitution, pornography, and abuse. - */ - sexual?: ContentFilterResult; - /** - * Describes language related to physical actions intended to hurt, injure, damage, or - * kill someone or something; describes weapons, etc. - */ - violence?: ContentFilterResult; +/** Possible API response formats. */ +export type ApiResponseFormat = "text" | "json_object"; + +/** Represents an assistant that can call the model and use tools. */ +export interface Assistant { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always assistant. */ + object: "assistant"; + /** The Unix timestamp, in seconds, representing when this object was created. */ + createdAt: Date; + /** The name of the assistant. */ + name: string | null; + /** The description of the assistant. */ + description: string | null; + /** The ID of the model to use. */ + model: string; + /** The system instructions for the assistant to use. */ + instructions: string | null; + /** The collection of tools enabled for the assistant. */ + tools: ToolDefinitionUnion[]; /** - * Describes language attacks or uses that include pejorative or discriminatory language - * with reference to a person or identity group on the basis of certain differentiating - * attributes of these groups including but not limited to race, ethnicity, nationality, - * gender identity and expression, sexual orientation, religion, immigration status, ability - * status, personal appearance, and body size. + * A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` + * tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. */ - hate?: ContentFilterResult; + toolResources: ToolResources | null; /** - * Describes language related to physical actions intended to purposely hurt, injure, - * or damage one’s body, or kill oneself. + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + * while lower values like 0.2 will make it more focused and deterministic. */ - selfHarm?: ContentFilterResult; - /** Describes whether profanity was detected. */ - profanity?: ContentFilterDetectionResult; - /** Describes detection results against configured custom blocklists. */ - customBlocklists?: ContentFilterBlocklistIdResult[]; + temperature: number | null; /** - * Describes an error returned if the content filtering system is - * down or otherwise unable to complete the operation in time. + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + * So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. */ - error?: undefined; - /** Information about detection of protected text material. */ - protectedMaterialText?: ContentFilterDetectionResult; - /** Information about detection of protected code material. */ - protectedMaterialCode?: ContentFilterCitedDetectionResult; -} - -/** Represents the outcome of a detection operation against protected resources as performed by content filtering. */ -export interface ContentFilterCitedDetectionResult { - /** A value indicating whether or not the content has been filtered. */ - filtered: boolean; - /** A value indicating whether detection occurred, irrespective of severity or whether the content was filtered. */ - detected: boolean; - /** The internet location associated with the detection. */ - url?: string; - /** The license description associated with the detection. */ - license: string; -} - -/** Representation of a log probabilities model for a completions generation. */ -export interface CompletionsLogProbabilityModel { - /** The textual forms of tokens evaluated in this probability model. */ - tokens: string[]; - /** A collection of log probability values for the tokens in this completions data. */ - tokenLogprobs: (number | null)[]; - /** A mapping of tokens to maximum log probability values in this completions data. */ - topLogprobs: Record[]; - /** The text offsets associated with tokens in this completions data. */ - textOffset: number[]; -} - -/** Representation of a log probabilities model for a completions generation. */ -export interface CompletionsLogProbabilityModel { - /** The textual forms of tokens evaluated in this probability model. */ - tokens: string[]; - /** A collection of log probability values for the tokens in this completions data. */ - tokenLogprobs: (number | null)[]; - /** A mapping of tokens to maximum log probability values in this completions data. */ - topLogprobs: Record[]; - /** The text offsets associated with tokens in this completions data. */ - textOffset: number[]; -} - -/** Representation of the manner in which a completions response concluded. */ -/** "stop", "length", "content_filter", "function_call", "tool_calls" */ -export type CompletionsFinishReason = string; + topP: number | null; + /** The response format of the tool calls used by this assistant. */ + responseFormat?: + | string + | AssistantsApiResponseFormatMode + | AssistantsApiResponseFormat; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata: Record; +} /** - * Representation of the token counts processed for a completions request. - * Counts consider all tokens across prompts, choices, choice alternates, best_of generations, and - * other consumers. + * A set of resources that are used by the assistant's tools. The resources are specific to the type of + * tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. */ -export interface CompletionsUsage { - /** The number of tokens generated across all completions emissions. */ - completionTokens: number; - /** The number of tokens in the provided prompts for the completions request. */ - promptTokens: number; - /** The total number of tokens processed for the completions request and response. */ - totalTokens: number; +export interface ToolResources { + /** Resources to be used by the `code_interpreter tool` consisting of file IDs. */ + codeInterpreter?: CodeInterpreterToolResource; + /** Resources to be used by the `file_search` tool consisting of vector store IDs. */ + fileSearch?: FileSearchToolResource; } -/** - * The configuration information for a chat completions request. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. - */ -export interface ChatCompletionsOptions { - /** - * The collection of context messages associated with this chat completions request. - * Typical usage begins with a chat message for the System role that provides instructions for - * the behavior of the assistant, followed by alternating messages between the User and - * Assistant roles. - */ - messages: ChatRequestMessageUnion[]; - /** A list of functions the model may generate JSON inputs for. */ - functions?: FunctionDefinition[]; +/** A set of resources that are used by the `code_interpreter` tool. */ +export interface CodeInterpreterToolResource { /** - * Controls how the model responds to function calls. "none" means the model does not call a function, - * and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. - * Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. - * "none" is the default when no functions are present. "auto" is the default if functions are present. + * A list of file IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. */ - functionCall?: FunctionCallPreset | FunctionName; - /** The maximum number of tokens to generate. */ - maxTokens?: number; - /** - * The sampling temperature to use that controls the apparent creativity of generated completions. - * Higher values will make output more random while lower values will make results more focused - * and deterministic. - * It is not recommended to modify temperature and top_p for the same completions request as the - * interaction of these two settings is difficult to predict. - */ - temperature?: number; - /** - * An alternative to sampling with temperature called nucleus sampling. This value causes the - * model to consider the results of tokens with the provided probability mass. As an example, a - * value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be - * considered. - * It is not recommended to modify temperature and top_p for the same completions request as the - * interaction of these two settings is difficult to predict. - */ - topP?: number; - /** - * A map between GPT token IDs and bias scores that influences the probability of specific tokens - * appearing in a completions response. Token IDs are computed via external tokenizer tools, while - * bias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to - * a full ban or exclusive selection of a token, respectively. The exact behavior of a given bias - * score varies by model. - */ - logitBias?: Record; - /** - * An identifier for the caller or end user of the operation. This may be used for tracking - * or rate-limiting purposes. - */ - user?: string; - /** - * The number of chat completions choices that should be generated for a chat completions - * response. - * Because this setting can generate many completions, it may quickly consume your token quota. - * Use carefully and ensure reasonable settings for max_tokens and stop. - */ - n?: number; - /** A collection of textual sequences that will end completions generation. */ - stop?: string[]; + fileIds: string[]; +} + +/** A set of resources that are used by the `file_search` tool. */ +export interface FileSearchToolResource { /** - * A value that influences the probability of generated tokens appearing based on their existing - * presence in generated text. - * Positive values will make tokens less likely to appear when they already exist and increase the - * model's likelihood to output new topics. + * The ID of the vector store attached to this assistant. There can be a maximum of 1 vector + * store attached to the assistant. */ - presencePenalty?: number; + vectorStoreIds?: string[]; +} + +/** The available sorting options when requesting a list of response objects. */ +export type ListSortOrder = "asc" | "desc"; + +/** The response data for a requested list of items. */ +export interface OpenAIPageableListOfAssistant { + /** The object type, which is always list. */ + object: "list"; + /** The requested list of items. */ + data: Assistant[]; + /** The first ID represented in this list. */ + firstId: string; + /** The last ID represented in this list. */ + lastId: string; + /** A value indicating whether there are additional values available not captured in this list. */ + hasMore: boolean; +} + +/** The request details to use when modifying an existing assistant. */ +export interface UpdateAssistantOptions { + /** The ID of the model to use. */ + model?: string; + /** The modified name for the assistant to use. */ + name?: string | null; + /** The modified description for the assistant to use. */ + description?: string | null; + /** The modified system instructions for the new assistant to use. */ + instructions?: string | null; + /** The modified collection of tools to enable for the assistant. */ + tools?: ToolDefinitionUnion[]; /** - * A value that influences the probability of generated tokens appearing based on their cumulative - * frequency in generated text. - * Positive values will make tokens less likely to appear as their frequency increases and - * decrease the likelihood of the model repeating the same statements verbatim. + * A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, + * the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. */ - frequencyPenalty?: number; - /** A value indicating whether chat completions should be streamed for this request. */ - stream?: boolean; + toolResources?: UpdateToolResourcesOptions; /** - * The model name to provide as part of this completions request. - * Not applicable to Azure OpenAI, where deployment information should be included in the Azure - * resource URI that's connected to. + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + * while lower values like 0.2 will make it more focused and deterministic. */ - model?: string; + temperature?: number | null; /** - * The configuration entries for Azure OpenAI chat extensions that use them. - * This additional specification is only compatible with Azure OpenAI. - */ - dataSources?: AzureChatExtensionConfiguration[]; - /** If provided, the configuration options for available Azure OpenAI chat enhancements. */ - enhancements?: AzureChatEnhancementConfiguration; + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + * So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + topP?: number | null; + /** The response format of the tool calls used by this assistant. */ + responseFormat?: + | string + | AssistantsApiResponseFormatMode + | AssistantsApiResponseFormat; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record; +} + +export function updateAssistantOptionsSerializer( + item: UpdateAssistantOptions, +): UpdateAssistantOptionsRest { + return { + model: item["model"], + name: item["name"], + description: item["description"], + instructions: item["instructions"], + tools: item["tools"], + tool_resources: !item.toolResources + ? item.toolResources + : updateToolResourcesOptionsSerializer(item.toolResources), + temperature: item["temperature"], + top_p: item["topP"], + response_format: item["responseFormat"], + metadata: !item.metadata + ? item.metadata + : (serializeRecord(item.metadata as any) as any), + }; +} + +/** + * Request object. A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. + * For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of + * vector store IDs. + */ +export interface UpdateToolResourcesOptions { /** - * If specified, the system will make a best effort to sample deterministically such that repeated requests with the - * same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the - * system_fingerprint response parameter to monitor changes in the backend." + * Overrides the list of file IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. */ - seed?: number; - /** Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model. */ - logprobs?: boolean | null; - /** An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. */ - topLogprobs?: number | null; - /** An object specifying the format that the model must output. Used to enable JSON mode. */ - responseFormat?: ChatCompletionsResponseFormatUnion; - /** The available tool definitions that the chat completions request can use, including caller-defined functions. */ - tools?: ChatCompletionsToolDefinitionUnion[]; - /** If specified, the model will configure which of the provided tools it can use for the chat completions response. */ - toolChoice?: ChatCompletionsToolSelectionPreset | ChatCompletionsNamedToolSelectionUnion; + codeInterpreter?: UpdateCodeInterpreterToolResourceOptions; + /** Overrides the vector store attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. */ + fileSearch?: UpdateFileSearchToolResourceOptions; } -/** An abstract representation of a chat message as provided in a request. */ -export interface ChatRequestMessage { - /** the discriminator possible values: system, user, assistant, tool, function */ - role: ChatRole; +export function updateToolResourcesOptionsSerializer( + item: UpdateToolResourcesOptions, +): UpdateToolResourcesOptionsRest { + return { + code_interpreter: !item.codeInterpreter + ? item.codeInterpreter + : updateCodeInterpreterToolResourceOptionsSerializer( + item.codeInterpreter, + ), + file_search: !item.fileSearch + ? item.fileSearch + : updateFileSearchToolResourceOptionsSerializer(item.fileSearch), + }; } -/** - * A request chat message containing system instructions that influence how the model will generate a chat completions - * response. - */ -export interface ChatRequestSystemMessage extends ChatRequestMessage { - /** The chat role associated with this message, which is always 'system' for system messages. */ - role: "system"; - /** The contents of the system message. */ - content: string; - /** An optional name for the participant. */ - name?: string; +/** Request object to update `code_interpreted` tool resources. */ +export interface UpdateCodeInterpreterToolResourceOptions { + /** A list of file IDs to override the current list of the assistant. */ + fileIds?: string[]; } -/** A request chat message representing user input to the assistant. */ -export interface ChatRequestUserMessage extends ChatRequestMessage { - /** The chat role associated with this message, which is always 'user' for user messages. */ - role: "user"; - /** The contents of the user message, with available input types varying by selected model. */ - content: string | ChatMessageContentItemUnion[]; - /** An optional name for the participant. */ - name?: string; +export function updateCodeInterpreterToolResourceOptionsSerializer( + item: UpdateCodeInterpreterToolResourceOptions, +): UpdateCodeInterpreterToolResourceOptionsRest { + return { + fileIds: item["fileIds"], + }; } -/** An abstract representation of a structured content item within a chat message. */ -export interface ChatMessageContentItem { - /** the discriminator possible values: text, image_url */ - type: string; +/** Request object to update `file_search` tool resources. */ +export interface UpdateFileSearchToolResourceOptions { + /** A list of vector store IDs to override the current list of the assistant. */ + vectorStoreIds?: string[]; } -/** A structured chat content item containing plain text. */ -export interface ChatMessageTextContentItem extends ChatMessageContentItem { - /** The discriminated object type: always 'text' for this type. */ - type: "text"; - /** The content of the message. */ - text: string; +export function updateFileSearchToolResourceOptionsSerializer( + item: UpdateFileSearchToolResourceOptions, +): UpdateFileSearchToolResourceOptionsRest { + return { + vector_store_ids: item["vectorStoreIds"], + }; } -/** A structured chat content item containing an image reference. */ -export interface ChatMessageImageContentItem extends ChatMessageContentItem { - /** The discriminated object type: always 'image_url' for this type. */ - type: "image_url"; - /** An internet location, which must be accessible to the model,from which the image may be retrieved. */ - imageUrl: ChatMessageImageUrl; +/** The status of an assistant deletion operation. */ +export interface AssistantDeletionStatus { + /** The ID of the resource specified for deletion. */ + id: string; + /** A value indicating whether deletion was successful. */ + deleted: boolean; + /** The object type, which is always 'assistant.deleted'. */ + object: "assistant.deleted"; } -/** An internet location from which the model may retrieve an image. */ -export interface ChatMessageImageUrl { - /** The URL of the image. */ - url: string; +/** The details used to create a new assistant thread. */ +export interface AssistantThreadCreationOptions { + /** The initial messages to associate with the new thread. */ + messages?: ThreadMessageOptions[]; /** - * The evaluation quality setting to use, which controls relative prioritization of speed, token consumption, and - * accuracy. + * A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the + * type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires + * a list of vector store IDs. */ - detail?: ChatMessageImageDetailLevel; + toolResources?: CreateToolResourcesOptions | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record; } -/** A representation of the possible image detail levels for image-based chat completions message content. */ -/** "auto", "low", "high" */ -export type ChatMessageImageDetailLevel = string; +export function assistantThreadCreationOptionsSerializer( + item: AssistantThreadCreationOptions, +): AssistantThreadCreationOptionsRest { + return { + messages: + item["messages"] === undefined + ? item["messages"] + : item["messages"].map(threadMessageOptionsSerializer), + tool_resources: !item.toolResources + ? item.toolResources + : createToolResourcesOptionsSerializer(item.toolResources), + metadata: !item.metadata + ? item.metadata + : (serializeRecord(item.metadata as any) as any), + }; +} -/** A request chat message representing response or action from the assistant. */ -export interface ChatRequestAssistantMessage extends ChatRequestMessage { - /** The chat role associated with this message, which is always 'assistant' for assistant messages. */ - role: "assistant"; - /** The content of the message. */ - content: string | null; - /** An optional name for the participant. */ - name?: string; +/** A single message within an assistant thread, as provided during that thread's creation for its initial state. */ +export interface ThreadMessageOptions { /** - * The tool calls that must be resolved and have their outputs appended to subsequent input messages for the chat - * completions request to resolve as configured. + * The role of the entity that is creating the message. Allowed values include: + * - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. + * - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into + * the conversation. */ - toolCalls?: ChatCompletionsToolCallUnion[]; + role: MessageRole; /** - * The function call that must be resolved and have its output appended to subsequent input messages for the chat - * completions request to resolve as configured. + * The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via + * a separate call to the create message API. */ - functionCall?: FunctionCall; -} - -/** - * An abstract representation of a tool call that must be resolved in a subsequent request to perform the requested - * chat completion. - */ -export interface ChatCompletionsToolCall { - /** the discriminator possible values: function */ - type: string; - /** The ID of the tool call. */ + content: string; + /** A list of files attached to the message, and the tools they should be added to. */ + attachments?: MessageAttachment[] | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record; +} + +export function threadMessageOptionsSerializer( + item: ThreadMessageOptions, +): ThreadMessageOptionsRest { + return { + role: item["role"], + content: item["content"], + attachments: + item["attachments"] === undefined || item["attachments"] === null + ? item["attachments"] + : item["attachments"].map(messageAttachmentSerializer), + metadata: !item.metadata + ? item.metadata + : (serializeRecord(item.metadata as any) as any), + }; +} + +/** The possible values for roles attributed to messages in a thread. */ +export type MessageRole = "user" | "assistant"; + +/** This describes to which tools a file has been attached. */ +export interface MessageAttachment { + /** The ID of the file to attach to the message. */ + fileId: string; + /** The tools to add to this file. */ + tools: MessageAttachmentToolDefinition[]; +} + +export function messageAttachmentSerializer( + item: MessageAttachment, +): MessageAttachmentRest { + return { + file_id: item["fileId"], + tools: item["tools"], + }; +} + +/** Information about a single thread associated with an assistant. */ +export interface AssistantThread { + /** The identifier, which can be referenced in API endpoints. */ id: string; - /** The index of the tool call. */ - index?: number; -} - -/** - * A tool call to a function tool, issued by the model in evaluation of a configured function tool, that represents - * a function invocation needed for a subsequent chat completions request to resolve. - */ -export interface ChatCompletionsFunctionToolCall extends ChatCompletionsToolCall { - /** The type of tool call, in this case always 'function'. */ - type: "function"; - /** The details of the function invocation requested by the tool call. */ - function: FunctionCall; + /** The object type, which is always 'thread'. */ + object: "thread"; + /** The Unix timestamp, in seconds, representing when this object was created. */ + createdAt: Date; + /** + * A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type + * of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list + * of vector store IDs. + */ + toolResources: ToolResources | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata: Record; } -/** The name and arguments of a function that should be called, as generated by the model. */ -export interface FunctionCall { - /** The name of the function to call. */ - name: string; +/** The details used to update an existing assistant thread */ +export interface UpdateAssistantThreadOptions { /** - * The arguments to call the function with, as generated by the model in JSON format. - * Note that the model does not always generate valid JSON, and may hallucinate parameters - * not defined by your function schema. Validate the arguments in your code before calling - * your function. + * A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the + * type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires + * a list of vector store IDs */ - arguments: string; + toolResources?: UpdateToolResourcesOptions | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record; } -/** A request chat message representing requested output from a configured tool. */ -export interface ChatRequestToolMessage extends ChatRequestMessage { - /** The chat role associated with this message, which is always 'tool' for tool messages. */ - role: "tool"; - /** The content of the message. */ - content: string | null; - /** The ID of the tool call resolved by the provided content. */ - toolCallId: string; +export function updateAssistantThreadOptionsSerializer( + item: UpdateAssistantThreadOptions, +): UpdateAssistantThreadOptionsRest { + return { + tool_resources: !item.toolResources + ? item.toolResources + : updateToolResourcesOptionsSerializer(item.toolResources), + metadata: !item.metadata + ? item.metadata + : (serializeRecord(item.metadata as any) as any), + }; } -/** A request chat message representing requested output from a configured function. */ -export interface ChatRequestFunctionMessage extends ChatRequestMessage { - /** The chat role associated with this message, which is always 'function' for function messages. */ - role: "function"; - /** The name of the function that was called to produce output. */ - name: string; - /** The output of the function as requested by the function call. */ - content: string | null; +/** The status of a thread deletion operation. */ +export interface ThreadDeletionStatus { + /** The ID of the resource specified for deletion. */ + id: string; + /** A value indicating whether deletion was successful. */ + deleted: boolean; + /** The object type, which is always 'thread.deleted'. */ + object: "thread.deleted"; } -/** A description of the intended purpose of a message within a chat completions interaction. */ -/** "system", "assistant", "user", "function", "tool" */ -export type ChatRole = string; - -/** The definition of a caller-specified function that chat completions may invoke in response to matching user input. */ -export interface FunctionDefinition { - /** The name of the function to be called. */ - name: string; - /** - * A description of what the function does. The model will use this description when selecting the function and - * interpreting its parameters. - */ - description?: string; - /** The parameters the function accepts, described as a JSON Schema object. */ - parameters?: Record; +/** A single, existing message within an assistant thread. */ +export interface ThreadMessage { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always 'thread.message'. */ + object: "thread.message"; + /** The Unix timestamp, in seconds, representing when this object was created. */ + createdAt: Date; + /** The ID of the thread that this message belongs to. */ + threadId: string; + /** The status of the message. */ + status: MessageStatus; + /** On an incomplete message, details about why the message is incomplete. */ + incompleteDetails: MessageIncompleteDetails | null; + /** The Unix timestamp (in seconds) for when the message was completed. */ + completedAt: Date | null; + /** The Unix timestamp (in seconds) for when the message was marked as incomplete. */ + incompleteAt: Date | null; + /** The role associated with the assistant thread message. */ + role: MessageRole; + /** The list of content items associated with the assistant thread message. */ + content: MessageContentUnion[]; + /** If applicable, the ID of the assistant that authored this message. */ + assistantId: string | null; + /** If applicable, the ID of the run associated with the authoring of this message. */ + runId: string | null; + /** A list of files attached to the message, and the tools they were added to. */ + attachments: MessageAttachment[] | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata: Record; +} + +export function threadMessageSerializer( + item: ThreadMessage, +): ThreadMessageRest { + return { + id: item["id"], + object: item["object"], + created_at: item["createdAt"].getTime(), + thread_id: item["threadId"], + status: item["status"], + incomplete_details: !item.incompleteDetails + ? item.incompleteDetails + : messageIncompleteDetailsSerializer(item.incompleteDetails), + completed_at: item["completedAt"].getTime(), + incomplete_at: item["incompleteAt"].getTime(), + role: item["role"], + content: item["content"].map((p) => messageContentUnionSerializer(p)), + assistant_id: item["assistantId"], + run_id: item["runId"], + attachments: + item["attachments"] === null + ? item["attachments"] + : item["attachments"].map(messageAttachmentSerializer), + metadata: !item.metadata + ? item.metadata + : (serializeRecord(item.metadata as any) as any), + }; +} + +/** The possible execution status values for a thread message. */ +export type MessageStatus = "in_progress" | "incomplete" | "completed"; + +/** Information providing additional detail about a message entering an incomplete status. */ +export interface MessageIncompleteDetails { + /** The provided reason describing why the message was marked as incomplete. */ + reason: MessageIncompleteDetailsReason; +} + +export function messageIncompleteDetailsSerializer( + item: MessageIncompleteDetails, +) { + return { + reason: item["reason"], + }; +} + +/** A set of reasons describing why a message is marked as incomplete. */ +export type MessageIncompleteDetailsReason = + | "content_filter" + | "max_tokens" + | "run_cancelled" + | "run_failed" + | "run_expired"; + +/** An abstract representation of a single item of thread message content. */ +export interface MessageContent { + /** the discriminator possible values: text, image_file */ + type: string; } -/** - * The collection of predefined behaviors for handling request-provided function information in a chat completions - * operation. - */ -/** "auto", "none" */ -export type FunctionCallPreset = string; +export function messageContentUnionSerializer(item: MessageContentUnion) { + switch (item.type) { + case "text": + return messageTextContentSerializer(item as MessageTextContent); -/** - * A structure that specifies the exact name of a specific, request-provided function to use when processing a chat - * completions operation. - */ -export interface FunctionName { - /** The name of the function to call. */ - name: string; -} + case "image_file": + return messageImageFileContentSerializer(item as MessageImageFileContent); -/** - * A representation of configuration data for a single Azure OpenAI chat extension. This will be used by a chat - * completions request that should use Azure OpenAI chat extensions to augment the response behavior. - * The use of this configuration is compatible only with Azure OpenAI. - */ -export interface AzureChatExtensionConfiguration { - /** the discriminator possible values: azure_search, azure_ml_index, azure_cosmos_db, elasticsearch, pinecone */ - type: AzureChatExtensionType; + default: + return messageContentSerializer(item); + } } -/** - * A specific representation of configurable options for Azure Search when using it as an Azure OpenAI chat - * extension. - */ -export interface AzureSearchChatExtensionConfiguration extends AzureChatExtensionConfiguration { - /** - * The type label to use when configuring Azure OpenAI chat extensions. This should typically not be changed from its - * default value for Azure Cognitive Search. - */ - type: "azure_search"; - /** - * The authentication method to use when accessing the defined data source. - * Each data source type supports a specific set of available authentication methods; please see the documentation of - * the data source for supported mechanisms. - * If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential) - * authentication. - */ - authentication?: OnYourDataAuthenticationOptionsUnion; - /** The configured top number of documents to feature for the configured query. */ - topNDocuments?: number; - /** Whether queries should be restricted to use of indexed data. */ - inScope?: boolean; - /** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */ - strictness?: number; - /** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */ - roleInformation?: string; - /** The absolute endpoint path for the Azure Cognitive Search resource to use. */ - endpoint: string; - /** The name of the index to use as available in the referenced Azure Cognitive Search resource. */ - indexName: string; - /** Customized field mapping behavior to use when interacting with the search index. */ - fieldsMapping?: AzureSearchIndexFieldMappingOptions; - /** The query type to use with Azure Cognitive Search. */ - queryType?: AzureSearchQueryType; - /** The additional semantic configuration for the query. */ - semanticConfiguration?: string; - /** Search filter. */ - filter?: string; - /** The embedding dependency for vector search. */ - embeddingDependency?: OnYourDataVectorizationSourceUnion; -} - -/** The authentication options for Azure OpenAI On Your Data. */ -export interface OnYourDataAuthenticationOptions { - /** the discriminator possible values: api_key, connection_string, key_and_key_id, encoded_api_key, access_token, system_assigned_managed_identity, user_assigned_managed_identity */ - type: OnYourDataAuthenticationType; -} - -/** The authentication options for Azure OpenAI On Your Data when using an API key. */ -export interface OnYourDataApiKeyAuthenticationOptions extends OnYourDataAuthenticationOptions { - /** The authentication type of API key. */ - type: "api_key"; - /** The API key to use for authentication. */ - key: string; -} - -/** The authentication options for Azure OpenAI On Your Data when using a connection string. */ -export interface OnYourDataConnectionStringAuthenticationOptions - extends OnYourDataAuthenticationOptions { - /** The authentication type of connection string. */ - type: "connection_string"; - /** The connection string to use for authentication. */ - connectionString: string; -} - -/** The authentication options for Azure OpenAI On Your Data when using an Elasticsearch key and key ID pair. */ -export interface OnYourDataKeyAndKeyIdAuthenticationOptions - extends OnYourDataAuthenticationOptions { - /** The authentication type of Elasticsearch key and key ID pair. */ - type: "key_and_key_id"; - /** The key to use for authentication. */ - key: string; - /** The key ID to use for authentication. */ - keyId: string; -} - -/** The authentication options for Azure OpenAI On Your Data when using an Elasticsearch encoded API key. */ -export interface OnYourDataEncodedApiKeyAuthenticationOptions - extends OnYourDataAuthenticationOptions { - /** The authentication type of Elasticsearch encoded API Key. */ - type: "encoded_api_key"; - /** The encoded API key to use for authentication. */ - encodedApiKey: string; -} - -/** The authentication options for Azure OpenAI On Your Data when using access token. */ -export interface OnYourDataAccessTokenAuthenticationOptions - extends OnYourDataAuthenticationOptions { - /** The authentication type of access token. */ - type: "access_token"; - /** The access token to use for authentication. */ - accessToken: string; -} - -/** The authentication options for Azure OpenAI On Your Data when using a system-assigned managed identity. */ -export interface OnYourDataSystemAssignedManagedIdentityAuthenticationOptions - extends OnYourDataAuthenticationOptions { - /** The authentication type of system-assigned managed identity. */ - type: "system_assigned_managed_identity"; -} - -/** The authentication options for Azure OpenAI On Your Data when using a user-assigned managed identity. */ -export interface OnYourDataUserAssignedManagedIdentityAuthenticationOptions - extends OnYourDataAuthenticationOptions { - /** The authentication type of user-assigned managed identity. */ - type: "user_assigned_managed_identity"; - /** The resource ID of the user-assigned managed identity to use for authentication. */ - managedIdentityResourceId: string; -} - -/** The authentication types supported with Azure OpenAI On Your Data. */ -/** "api_key", "connection_string", "key_and_key_id", "encoded_api_key", "access_token", "system_assigned_managed_identity", "user_assigned_managed_identity" */ -export type OnYourDataAuthenticationType = string; - -/** Optional settings to control how fields are processed when using a configured Azure Search resource. */ -export interface AzureSearchIndexFieldMappingOptions { - /** The name of the index field to use as a title. */ - titleField?: string; - /** The name of the index field to use as a URL. */ - urlField?: string; - /** The name of the index field to use as a filepath. */ - filepathField?: string; - /** The names of index fields that should be treated as content. */ - contentFields?: string[]; - /** The separator pattern that content fields should use. */ - contentFieldsSeparator?: string; - /** The names of fields that represent vector data. */ - vectorFields?: string[]; - /** The names of fields that represent image vector data. */ - imageVectorFields?: string[]; -} - -/** The type of Azure Search retrieval query that should be executed when using it as an Azure OpenAI chat extension. */ -/** "simple", "semantic", "vector", "vector_simple_hybrid", "vector_semantic_hybrid" */ -export type AzureSearchQueryType = string; - -/** An abstract representation of a vectorization source for Azure OpenAI On Your Data with vector search. */ -export interface OnYourDataVectorizationSource { - /** the discriminator possible values: endpoint, deployment_name, model_id */ - type: OnYourDataVectorizationSourceType; +export function messageContentSerializer( + item: MessageContentUnion, +): MessageContentRest { + return { + ...messageContentUnionSerializer(item), + }; } -/** - * The details of a a vectorization source, used by Azure OpenAI On Your Data when applying vector search, that is based - * on a public Azure OpenAI endpoint call for embeddings. - */ -export interface OnYourDataEndpointVectorizationSource extends OnYourDataVectorizationSource { - /** The type of vectorization source to use. Always 'Endpoint' for this type. */ - type: "endpoint"; - /** Specifies the resource endpoint URL from which embeddings should be retrieved. It should be in the format of https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME/embeddings. The api-version query parameter is not allowed. */ - endpoint: string; - /** Specifies the authentication options to use when retrieving embeddings from the specified endpoint. */ - authentication: OnYourDataAuthenticationOptionsUnion; +/** A representation of a textual item of thread message content. */ +export interface MessageTextContent extends MessageContent { + /** The object type, which is always 'text'. */ + type: "text"; + /** The text and associated annotations for this thread message content item. */ + text: MessageTextDetails; } -/** - * The details of a a vectorization source, used by Azure OpenAI On Your Data when applying vector search, that is based - * on an internal embeddings model deployment name in the same Azure OpenAI resource. - */ -export interface OnYourDataDeploymentNameVectorizationSource extends OnYourDataVectorizationSource { - /** The type of vectorization source to use. Always 'DeploymentName' for this type. */ - type: "deployment_name"; - /** The embedding model deployment name within the same Azure OpenAI resource. This enables you to use vector search without Azure OpenAI api-key and without Azure OpenAI public network access. */ - deploymentName: string; +export function messageTextContentSerializer( + item: MessageTextContent, +): MessageTextContentRest { + return { + type: item["type"], + text: messageTextDetailsSerializer(item.text), + }; } -/** - * The details of a a vectorization source, used by Azure OpenAI On Your Data when applying vector search, that is based - * on a search service model ID. Currently only supported by Elasticsearch®. - */ -export interface OnYourDataModelIdVectorizationSource extends OnYourDataVectorizationSource { - /** The type of vectorization source to use. Always 'ModelId' for this type. */ - type: "model_id"; - /** The embedding model ID build inside the search service. Currently only supported by Elasticsearch®. */ - modelId: string; +/** The text and associated annotations for a single item of assistant thread message content. */ +export interface MessageTextDetails { + /** The text data. */ + value: string; + /** A list of annotations associated with this text. */ + annotations: MessageTextAnnotationUnion[]; } -/** - * Represents the available sources Azure OpenAI On Your Data can use to configure vectorization of data for use with - * vector search. - */ -/** "endpoint", "deployment_name", "model_id" */ -export type OnYourDataVectorizationSourceType = string; +export function messageTextDetailsSerializer( + item: MessageTextDetails, +): MessageTextDetailsRest { + return { + value: item["value"], + annotations: item["annotations"].map((p) => + messageTextAnnotationUnionSerializer(p), + ), + }; +} -/** - * A specific representation of configurable options for Azure Machine Learning vector index when using it as an Azure - * OpenAI chat extension. - */ -export interface AzureMachineLearningIndexChatExtensionConfiguration { - /** - * The type label to use when configuring Azure OpenAI chat extensions. This should typically not be changed from its - * default value for Azure Machine Learning vector index. - */ - type: "azure_ml_index"; - /** - * The authentication method to use when accessing the defined data source. - * Each data source type supports a specific set of available authentication methods; please see the documentation of - * the data source for supported mechanisms. - * If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential) - * authentication. - */ - authentication?: OnYourDataAuthenticationOptionsUnion; - /** The configured top number of documents to feature for the configured query. */ - topNDocuments?: number; - /** Whether queries should be restricted to use of indexed data. */ - inScope?: boolean; - /** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */ - strictness?: number; - /** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */ - roleInformation?: string; - /** The resource ID of the Azure Machine Learning project. */ - projectResourceId: string; - /** The Azure Machine Learning vector index name. */ - name: string; - /** The version of the Azure Machine Learning vector index. */ - version: string; - /** Search filter. Only supported if the Azure Machine Learning vector index is of type AzureSearch. */ - filter?: string; +/** An abstract representation of an annotation to text thread message content. */ +export interface MessageTextAnnotation { + /** the discriminator possible values: file_citation, file_path */ + type: string; + /** The textual content associated with this text annotation item. */ + text: string; } -/** - * A specific representation of configurable options for Azure Cosmos DB when using it as an Azure OpenAI chat - * extension. - */ -export interface AzureCosmosDBChatExtensionConfiguration { - /** - * The type label to use when configuring Azure OpenAI chat extensions. This should typically not be changed from its - * default value for Azure Cosmos DB. - */ - type: "azure_cosmos_db"; - /** - * The authentication method to use when accessing the defined data source. - * Each data source type supports a specific set of available authentication methods; please see the documentation of - * the data source for supported mechanisms. - * If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential) - * authentication. - */ - authentication?: OnYourDataAuthenticationOptionsUnion; - /** The configured top number of documents to feature for the configured query. */ - topNDocuments?: number; - /** Whether queries should be restricted to use of indexed data. */ - inScope?: boolean; - /** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */ - strictness?: number; - /** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */ - roleInformation?: string; - /** The MongoDB vCore database name to use with Azure Cosmos DB. */ - databaseName: string; - /** The name of the Azure Cosmos DB resource container. */ - containerName: string; - /** The MongoDB vCore index name to use with Azure Cosmos DB. */ - indexName: string; - /** Customized field mapping behavior to use when interacting with the search index. */ - fieldsMapping: AzureCosmosDBFieldMappingOptions; - /** The embedding dependency for vector search. */ - embeddingDependency: OnYourDataVectorizationSourceUnion; -} -/** Optional settings to control how fields are processed when using a configured Azure Cosmos DB resource. */ -export interface AzureCosmosDBFieldMappingOptions { - /** The name of the index field to use as a title. */ - titleField?: string; - /** The name of the index field to use as a URL. */ - urlField?: string; - /** The name of the index field to use as a filepath. */ - filepathField?: string; - /** The names of index fields that should be treated as content. */ - contentFields: string[]; - /** The separator pattern that content fields should use. */ - contentFieldsSeparator?: string; - /** The names of fields that represent vector data. */ - vectorFields: string[]; +export function messageTextAnnotationUnionSerializer( + item: MessageTextAnnotationUnion, +) { + switch (item.type) { + case "file_citation": + return messageTextFileCitationAnnotationSerializer( + item as MessageTextFileCitationAnnotation, + ); + + case "file_path": + return messageTextFilePathAnnotationSerializer( + item as MessageTextFilePathAnnotation, + ); + + default: + return messageTextAnnotationSerializer(item); + } } -/** - * A specific representation of configurable options for Elasticsearch when using it as an Azure OpenAI chat - * extension. - */ -export interface ElasticsearchChatExtensionConfiguration { - /** - * The type label to use when configuring Azure OpenAI chat extensions. This should typically not be changed from its - * default value for Elasticsearch®. +export function messageTextAnnotationSerializer( + item: MessageTextAnnotationUnion, +): MessageTextAnnotationRest { + return { + ...messageTextAnnotationUnionSerializer(item), + }; +} + +/** A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the 'file_search' tool to search files. */ +export interface MessageTextFileCitationAnnotation + extends MessageTextAnnotation { + /** The object type, which is always 'file_citation'. */ + type: "file_citation"; + /** + * A citation within the message that points to a specific quote from a specific file. + * Generated when the assistant uses the "file_search" tool to search files. + */ + fileCitation: MessageTextFileCitationDetails; + /** The first text index associated with this text annotation. */ + startIndex?: number; + /** The last text index associated with this text annotation. */ + endIndex?: number; +} + +export function messageTextFileCitationAnnotationSerializer( + item: MessageTextFileCitationAnnotation, +): MessageTextFileCitationAnnotationRest { + return { + type: item["type"], + text: item["text"], + file_citation: messageTextFileCitationDetailsSerializer(item.fileCitation), + start_index: item["startIndex"], + end_index: item["endIndex"], + }; +} + +/** A representation of a file-based text citation, as used in a file-based annotation of text thread message content. */ +export interface MessageTextFileCitationDetails { + /** The ID of the file associated with this citation. */ + fileId: string; + /** The specific quote cited in the associated file. */ + quote: string; +} + +export function messageTextFileCitationDetailsSerializer( + item: MessageTextFileCitationDetails, +): MessageTextFileCitationDetailsRest { + return { + file_id: item["fileId"], + quote: item["quote"], + }; +} + +/** A citation within the message that points to a file located at a specific path. */ +export interface MessageTextFilePathAnnotation extends MessageTextAnnotation { + /** The object type, which is always 'file_path'. */ + type: "file_path"; + /** A URL for the file that's generated when the assistant used the code_interpreter tool to generate a file. */ + filePath: MessageTextFilePathDetails; + /** The first text index associated with this text annotation. */ + startIndex?: number; + /** The last text index associated with this text annotation. */ + endIndex?: number; +} + +export function messageTextFilePathAnnotationSerializer( + item: MessageTextFilePathAnnotation, +): MessageTextFilePathAnnotationRest { + return { + type: item["type"], + text: item["text"], + file_path: messageTextFilePathDetailsSerializer(item.filePath), + start_index: item["startIndex"], + end_index: item["endIndex"], + }; +} + +/** An encapsulation of an image file ID, as used by message image content. */ +export interface MessageTextFilePathDetails { + /** The ID of the specific file that the citation is from. */ + fileId: string; +} + +export function messageTextFilePathDetailsSerializer( + item: MessageTextFilePathDetails, +): MessageTextFilePathDetailsRest { + return { + file_id: item["fileId"], + }; +} + +/** A representation of image file content in a thread message. */ +export interface MessageImageFileContent extends MessageContent { + /** The object type, which is always 'image_file'. */ + type: "image_file"; + /** The image file for this thread message content item. */ + imageFile: MessageImageFileDetails; +} + +export function messageImageFileContentSerializer( + item: MessageImageFileContent, +): MessageImageFileContentRest { + return { + type: item["type"], + image_file: messageImageFileDetailsSerializer(item.imageFile), + }; +} + +/** An image reference, as represented in thread message content. */ +export interface MessageImageFileDetails { + /** The ID for the file associated with this image. */ + fileId: string; +} + +export function messageImageFileDetailsSerializer( + item: MessageImageFileDetails, +): MessageImageFileDetailsRest { + return { + file_id: item["fileId"], + }; +} + +/** The response data for a requested list of items. */ +export interface OpenAIPageableListOfThreadMessage { + /** The object type, which is always list. */ + object: "list"; + /** The requested list of items. */ + data: ThreadMessage[]; + /** The first ID represented in this list. */ + firstId: string; + /** The last ID represented in this list. */ + lastId: string; + /** A value indicating whether there are additional values available not captured in this list. */ + hasMore: boolean; +} + +/** The details used when creating a new run of an assistant thread. */ +export interface CreateRunOptions { + /** The ID of the assistant that should run the thread. */ + assistantId: string; + /** The overridden model name that the assistant should use to run the thread. */ + model?: string | null; + /** The overridden system instructions that the assistant should use to run the thread. */ + instructions?: string | null; + /** + * Additional instructions to append at the end of the instructions for the run. This is useful for modifying the behavior + * on a per-run basis without overriding other instructions. + */ + additionalInstructions?: string | null; + /** Adds additional messages to the thread before creating the run. */ + additionalMessages?: ThreadMessage[] | null; + /** The overridden list of enabled tools that the assistant should use to run the thread. */ + tools?: ToolDefinitionUnion[] | null; + /** + * If `true`, returns a stream of events that happen during the Run as server-sent events, + * terminating when the Run enters a terminal state with a `data: [DONE]` message. */ - type: "elasticsearch"; + stream?: boolean; /** - * The authentication method to use when accessing the defined data source. - * Each data source type supports a specific set of available authentication methods; please see the documentation of - * the data source for supported mechanisms. - * If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential) - * authentication. + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. */ - authentication?: OnYourDataAuthenticationOptionsUnion; - /** The configured top number of documents to feature for the configured query. */ - topNDocuments?: number; - /** Whether queries should be restricted to use of indexed data. */ - inScope?: boolean; - /** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */ - strictness?: number; - /** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */ - roleInformation?: string; - /** The endpoint of Elasticsearch®. */ - endpoint: string; - /** The index name of Elasticsearch®. */ - indexName: string; - /** The index field mapping options of Elasticsearch®. */ - fieldsMapping?: ElasticsearchIndexFieldMappingOptions; - /** The query type of Elasticsearch®. */ - queryType?: ElasticsearchQueryType; - /** The embedding dependency for vector search. */ - embeddingDependency?: OnYourDataVectorizationSourceUnion; -} - -/** Optional settings to control how fields are processed when using a configured Elasticsearch® resource. */ -export interface ElasticsearchIndexFieldMappingOptions { - /** The name of the index field to use as a title. */ - titleField?: string; - /** The name of the index field to use as a URL. */ - urlField?: string; - /** The name of the index field to use as a filepath. */ - filepathField?: string; - /** The names of index fields that should be treated as content. */ - contentFields?: string[]; - /** The separator pattern that content fields should use. */ - contentFieldsSeparator?: string; - /** The names of fields that represent vector data. */ - vectorFields?: string[]; -} - -/** The type of Elasticsearch® retrieval query that should be executed when using it as an Azure OpenAI chat extension. */ -/** "simple", "vector" */ -export type ElasticsearchQueryType = string; + temperature?: number | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + * comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + topP?: number | null; + /** + * The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only + * the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, + * the run will end with status `incomplete`. See `incomplete_details` for more info. + */ + maxPromptTokens?: number | null; + /** + * The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort + * to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of + * completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + */ + maxCompletionTokens?: number | null; + /** The strategy to use for dropping messages as the context windows moves forward. */ + truncationStrategy?: TruncationObject | null; + /** Controls whether or not and which tool is called by the model. */ + toolChoice?: + | string + | AssistantsApiToolChoiceOptionMode + | AssistantsNamedToolChoice; + /** Specifies the format that the model must output. */ + responseFormat?: + | string + | AssistantsApiResponseFormatMode + | AssistantsApiResponseFormat; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record; +} + +export function createRunOptionsSerializer( + item: CreateRunOptions, +): CreateRunOptionsRest { + return { + assistant_id: item["assistantId"], + model: item["model"], + instructions: item["instructions"], + additional_instructions: item["additionalInstructions"], + additional_messages: + item["additionalMessages"] === undefined || + item["additionalMessages"] === null + ? item["additionalMessages"] + : item["additionalMessages"].map(threadMessageSerializer), + tools: item["tools"], + stream: item["stream"], + temperature: item["temperature"], + top_p: item["topP"], + max_prompt_tokens: item["maxPromptTokens"], + max_completion_tokens: item["maxCompletionTokens"], + truncation_strategy: !item.truncationStrategy + ? item.truncationStrategy + : truncationObjectSerializer(item.truncationStrategy), + tool_choice: item["toolChoice"], + response_format: item["responseFormat"], + metadata: !item.metadata + ? item.metadata + : (serializeRecord(item.metadata as any) as any), + }; +} /** - * A specific representation of configurable options for Pinecone when using it as an Azure OpenAI chat - * extension. + * Controls for how a thread will be truncated prior to the run. Use this to control the initial + * context window of the run. */ -export interface PineconeChatExtensionConfiguration { - /** - * The type label to use when configuring Azure OpenAI chat extensions. This should typically not be changed from its - * default value for Pinecone. - */ - type: "pinecone"; +export interface TruncationObject { /** - * The authentication method to use when accessing the defined data source. - * Each data source type supports a specific set of available authentication methods; please see the documentation of - * the data source for supported mechanisms. - * If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential) - * authentication. + * The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will + * be truncated to the `lastMessages` count most recent messages in the thread. When set to `auto`, messages in the middle of the thread + * will be dropped to fit the context length of the model, `max_prompt_tokens`. */ - authentication?: OnYourDataAuthenticationOptions; - /** The configured top number of documents to feature for the configured query. */ - topNDocuments?: number; - /** Whether queries should be restricted to use of indexed data. */ - inScope?: boolean; - /** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */ - strictness?: number; - /** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */ - roleInformation?: string; - /** The environment name of Pinecone. */ - environment: string; - /** The name of the Pinecone database index. */ - indexName: string; - /** Customized field mapping behavior to use when interacting with the search index. */ - fieldsMapping: PineconeFieldMappingOptions; - /** The embedding dependency for vector search. */ - embeddingDependency: OnYourDataVectorizationSourceUnion; -} - -/** Optional settings to control how fields are processed when using a configured Pinecone resource. */ -export interface PineconeFieldMappingOptions { - /** The name of the index field to use as a title. */ - titleField?: string; - /** The name of the index field to use as a URL. */ - urlField?: string; - /** The name of the index field to use as a filepath. */ - filepathField?: string; - /** The names of index fields that should be treated as content. */ - contentFields: string[]; - /** The separator pattern that content fields should use. */ - contentFieldsSeparator?: string; + type: TruncationStrategy; + /** The number of most recent messages from the thread when constructing the context for the run. */ + lastMessages?: number | null; } -/** - * A representation of configuration data for a single Azure OpenAI chat extension. This will be used by a chat - * completions request that should use Azure OpenAI chat extensions to augment the response behavior. - * The use of this configuration is compatible only with Azure OpenAI. - */ -/** "azure_search", "azure_ml_index", "azure_cosmos_db", "elasticsearch", "pinecone" */ -export type AzureChatExtensionType = string; - -/** A representation of the available Azure OpenAI enhancement configurations. */ -export interface AzureChatEnhancementConfiguration { - /** A representation of the available options for the Azure OpenAI grounding enhancement. */ - grounding?: AzureChatGroundingEnhancementConfiguration; - /** A representation of the available options for the Azure OpenAI optical character recognition (OCR) enhancement. */ - ocr?: AzureChatOCREnhancementConfiguration; +export function truncationObjectSerializer(item: TruncationObject) { + return { + type: item["type"], + last_messages: item["lastMessages"], + }; } -/** A representation of the available options for the Azure OpenAI grounding enhancement. */ -export interface AzureChatGroundingEnhancementConfiguration { - /** Specifies whether the enhancement is enabled. */ - enabled: boolean; -} +/** Possible truncation strategies for the thread. */ +export type TruncationStrategy = "auto" | "last_messages"; +/** Specifies how the tool choice will be used */ +export type AssistantsApiToolChoiceOptionMode = "none" | "auto"; -/** A representation of the available options for the Azure OpenAI optical character recognition (OCR) enhancement. */ -export interface AzureChatOCREnhancementConfiguration { - /** Specifies whether the enhancement is enabled. */ - enabled: boolean; +/** Specifies a tool the model should use. Use to force the model to call a specific tool. */ +export interface AssistantsNamedToolChoice { + /** the type of tool. If type is `function`, the function name must be set. */ + type: AssistantsNamedToolChoiceType; + /** The name of the function to call */ + function?: FunctionName; } -/** - * An abstract representation of a response format configuration usable by Chat Completions. Can be used to enable JSON - * mode. - */ -export interface ChatCompletionsResponseFormat { - /** the discriminator possible values: text, json_object */ - type: string; +export function assistantsNamedToolChoiceSerializer( + item: AssistantsNamedToolChoice, +): AssistantsNamedToolChoiceRest { + return { + type: item["type"], + function: !item.function + ? item.function + : functionNameSerializer(item.function), + }; } -/** - * The standard Chat Completions response format that can freely generate text and is not guaranteed to produce response - * content that adheres to a specific schema. - */ -export interface ChatCompletionsTextResponseFormat extends ChatCompletionsResponseFormat { - /** The discriminated object type, which is always 'text' for this format. */ - type: "text"; +/** Available tool types for assistants named tools. */ +export type AssistantsNamedToolChoiceType = + | "function" + | "code_interpreter" + | "file_search"; + +/** The function name that will be used, if using the `function` tool */ +export interface FunctionName { + /** The name of the function to call */ + name: string; } -/** A response format for Chat Completions that restricts responses to emitting valid JSON objects. */ -export interface ChatCompletionsJsonResponseFormat extends ChatCompletionsResponseFormat { - /** The discriminated object type, which is always 'json_object' for this format. */ - type: "json_object"; +export function functionNameSerializer(item: FunctionName): FunctionNameRest { + return { + name: item["name"], + }; } -/** An abstract representation of a tool that can be used by the model to improve a chat completions response. */ -export interface ChatCompletionsToolDefinition { - /** the discriminator possible values: function */ +/** Data representing a single evaluation run of an assistant thread. */ +export interface ThreadRun { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always 'thread.run'. */ + object: "thread.run"; + /** The ID of the thread associated with this run. */ + threadId: string; + /** The ID of the assistant associated with the thread this run was performed against. */ + assistantId: string; + /** The status of the assistant thread run. */ + status: RunStatus; + /** The details of the action required for the assistant thread run to continue. */ + requiredAction?: RequiredActionUnion | null; + /** The last error, if any, encountered by this assistant thread run. */ + lastError: RunError | null; + /** The ID of the model to use. */ + model: string; + /** The overridden system instructions used for this assistant thread run. */ + instructions: string; + /** The overridden enabled tools used for this assistant thread run. */ + tools: ToolDefinitionUnion[]; + /** The Unix timestamp, in seconds, representing when this object was created. */ + createdAt: Date; + /** The Unix timestamp, in seconds, representing when this item expires. */ + expiresAt: Date | null; + /** The Unix timestamp, in seconds, representing when this item was started. */ + startedAt: Date | null; + /** The Unix timestamp, in seconds, representing when this completed. */ + completedAt: Date | null; + /** The Unix timestamp, in seconds, representing when this was cancelled. */ + cancelledAt: Date | null; + /** The Unix timestamp, in seconds, representing when this failed. */ + failedAt: Date | null; + /** Details on why the run is incomplete. Will be `null` if the run is not incomplete. */ + incompleteDetails: IncompleteRunDetails | null; + /** Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). */ + usage: RunCompletionUsage | null; + /** The sampling temperature used for this run. If not set, defaults to 1. */ + temperature?: number | null; + /** The nucleus sampling value used for this run. If not set, defaults to 1. */ + topP?: number | null; + /** The maximum number of prompt tokens specified to have been used over the course of the run. */ + maxPromptTokens: number | null; + /** The maximum number of completion tokens specified to have been used over the course of the run. */ + maxCompletionTokens: number | null; + /** The strategy to use for dropping messages as the context windows moves forward. */ + truncationStrategy: TruncationObject | null; + /** Controls whether or not and which tool is called by the model. */ + toolChoice: + | string + | AssistantsApiToolChoiceOptionMode + | AssistantsNamedToolChoice; + /** The response format of the tool calls used in this run. */ + responseFormat: + | string + | AssistantsApiResponseFormatMode + | AssistantsApiResponseFormat; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata: Record; +} + +/** Possible values for the status of an assistant thread run. */ +export type RunStatus = + | "queued" + | "in_progress" + | "requires_action" + | "cancelling" + | "cancelled" + | "failed" + | "completed" + | "expired"; + +/** An abstract representation of a required action for an assistant thread run to continue. */ +export interface RequiredAction { + /** the discriminator possible values: submit_tool_outputs */ type: string; } -/** The definition information for a chat completions function tool that can call a function in response to a tool call. */ -export interface ChatCompletionsFunctionToolDefinition extends ChatCompletionsToolDefinition { - /** The object name, which is always 'function'. */ - type: "function"; - /** The function definition details for the function tool. */ - function: FunctionDefinition; +/** The details for required tool calls that must be submitted for an assistant thread run to continue. */ +export interface SubmitToolOutputsAction extends RequiredAction { + /** The object type, which is always 'submit_tool_outputs'. */ + type: "submit_tool_outputs"; + /** The details describing tools that should be called to submit tool outputs. */ + submitToolOutputs: SubmitToolOutputsDetails; } -/** Represents a generic policy for how a chat completions tool may be selected. */ -/** "auto", "none" */ -export type ChatCompletionsToolSelectionPreset = string; +/** The details describing tools that should be called to submit tool outputs. */ +export interface SubmitToolOutputsDetails { + /** The list of tool calls that must be resolved for the assistant thread run to continue. */ + toolCalls: RequiredToolCallUnion[]; +} -/** An abstract representation of an explicit, named tool selection to use for a chat completions request. */ -export interface ChatCompletionsNamedToolSelection { +/** An abstract representation a a tool invocation needed by the model to continue a run. */ +export interface RequiredToolCall { /** the discriminator possible values: function */ type: string; + /** The ID of the tool call. This ID must be referenced when submitting tool outputs. */ + id: string; } -/** A tool selection of a specific, named function tool that will limit chat completions to using the named function. */ -export interface ChatCompletionsNamedFunctionToolSelection - extends ChatCompletionsNamedToolSelection { - /** The object type, which is always 'function'. */ +/** A representation of a requested call to a function tool, needed by the model to continue evaluation of a run. */ +export interface RequiredFunctionToolCall extends RequiredToolCall { + /** The object type of the required tool call. Always 'function' for function tools. */ type: "function"; - /** The function that should be called. */ - function: ChatCompletionsFunctionToolSelection; + /** Detailed information about the function to be executed by the tool that includes name and arguments. */ + function: RequiredFunctionToolCallDetails; } -/** A tool selection of a specific, named function tool that will limit chat completions to using the named function. */ -export interface ChatCompletionsFunctionToolSelection { - /** The name of the function that should be called. */ +/** The detailed information for a function invocation, as provided by a required action invoking a function tool, that includes the name of and arguments to the function. */ +export interface RequiredFunctionToolCallDetails { + /** The name of the function. */ name: string; + /** The arguments to use when invoking the named function, as provided by the model. Arguments are presented as a JSON document that should be validated and parsed for evaluation. */ + arguments: string; } -/** - * Representation of the response data from a chat completions request. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. - */ -export interface ChatCompletions { - /** A unique identifier associated with this chat completions response. */ - id: string; - /** The current model used for the chat completions request. */ - model: string; - /** - * The first timestamp associated with generation activity for this completions response, - * represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. - */ - created: Date; - /** - * The collection of completions choices associated with this completions response. - * Generally, `n` choices are generated per provided prompt with a default value of 1. - * Token limits and other settings may limit the number of choices generated. - */ - choices: ChatChoice[]; - /** - * Content filtering results for zero or more prompts in the request. In a streaming request, - * results for different prompts may arrive at different times or in different orders. - */ - promptFilterResults?: ContentFilterResultsForPrompt[]; - /** - * Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that - * might impact determinism. - */ - systemFingerprint?: string; - /** Usage information for tokens processed and generated as part of this completions operation. */ - usage?: CompletionsUsage; +/** The details of an error as encountered by an assistant thread run. */ +export interface RunError { + /** The status for the error. */ + code: string; + /** The human-readable text associated with the error. */ + message: string; } -/** - * The representation of a single prompt completion as part of an overall chat completions request. - * Generally, `n` choices are generated per provided prompt with a default value of 1. - * Token limits and other settings may limit the number of choices generated. - */ -export interface ChatChoice { - /** The chat message for a given chat completions prompt. */ - message?: ChatResponseMessage; - /** The log probability information for this choice, as enabled via the 'logprobs' request option. */ - logprobs: ChatChoiceLogProbabilityInfo | null; - /** The ordered index associated with this chat completions choice. */ - index: number; - /** The reason that this chat completions choice completed its generated. */ - finishReason: CompletionsFinishReason | null; - /** - * The reason the model stopped generating tokens, together with any applicable details. - * This structured representation replaces 'finish_reason' for some models. - */ - finishDetails?: ChatFinishDetailsUnion; - /** The delta message content for a streaming response. */ - delta?: ChatResponseMessage; - /** - * Information about the content filtering category (hate, sexual, violence, self_harm), if it - * has been detected, as well as the severity level (very_low, low, medium, high-scale that - * determines the intensity and risk level of harmful content) and if it has been filtered or not. - */ - contentFilterResults?: ContentFilterResultsForChoice; - /** - * Represents the output results of Azure OpenAI enhancements to chat completions, as configured via the matching input - * provided in the request. This supplementary information is only available when using Azure OpenAI and only when the - * request is configured to use enhancements. - */ - enhancements?: AzureChatEnhancements; +/** The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. */ +export type IncompleteRunDetails = + | "max_completion_tokens" + | "max_prompt_tokens"; + +/** Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). */ +export interface RunCompletionUsage { + /** Number of completion tokens used over the course of the run. */ + completionTokens: number; + /** Number of prompt tokens used over the course of the run. */ + promptTokens: number; + /** Total number of tokens used (prompt + completion). */ + totalTokens: number; } -/** A representation of a chat message as received in a response. */ -export interface ChatResponseMessage { - /** The chat role associated with the message. */ - role: ChatRole; - /** The content of the message. */ - content: string | null; - /** - * The tool calls that must be resolved and have their outputs appended to subsequent input messages for the chat - * completions request to resolve as configured. +/** The response data for a requested list of items. */ +export interface OpenAIPageableListOfThreadRun { + /** The object type, which is always list. */ + object: "list"; + /** The requested list of items. */ + data: ThreadRun[]; + /** The first ID represented in this list. */ + firstId: string; + /** The last ID represented in this list. */ + lastId: string; + /** A value indicating whether there are additional values available not captured in this list. */ + hasMore: boolean; +} + +/** The data provided during a tool outputs submission to resolve pending tool calls and allow the model to continue. */ +export interface ToolOutput { + /** The ID of the tool call being resolved, as provided in the tool calls of a required action from a run. */ + toolCallId?: string; + /** The output from the tool to be submitted. */ + output?: string; +} + +export function toolOutputSerializer(item: ToolOutput): ToolOutputRest { + return { + tool_call_id: item["toolCallId"], + output: item["output"], + }; +} + +/** The details used when creating and immediately running a new assistant thread. */ +export interface CreateAndRunThreadOptions { + /** The ID of the assistant for which the thread should be created. */ + assistantId: string; + /** The details used to create the new thread. If no thread is provided, an empty one will be created. */ + thread?: AssistantThreadCreationOptions; + /** The overridden model that the assistant should use to run the thread. */ + model?: string | null; + /** The overridden system instructions the assistant should use to run the thread. */ + instructions?: string | null; + /** The overridden list of enabled tools the assistant should use to run the thread. */ + tools?: ToolDefinitionUnion[] | null; + /** Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. */ + toolResources?: UpdateToolResourcesOptions | null; + /** + * If `true`, returns a stream of events that happen during the Run as server-sent events, + * terminating when the Run enters a terminal state with a `data: [DONE]` message. */ - toolCalls?: ChatCompletionsToolCallUnion[]; + stream?: boolean; /** - * The function call that must be resolved and have its output appended to subsequent input messages for the chat - * completions request to resolve as configured. + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. */ - functionCall?: FunctionCall; + temperature?: number | null; /** - * If Azure OpenAI chat extensions are configured, this array represents the incremental steps performed by those - * extensions while processing the chat completions request. - */ - context?: AzureChatExtensionsMessageContext; + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + * comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + topP?: number | null; + /** + * The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only + * the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, + * the run will end with status `incomplete`. See `incomplete_details` for more info. + */ + maxPromptTokens?: number | null; + /** + * The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only + * the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens + * specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + */ + maxCompletionTokens?: number | null; + /** The strategy to use for dropping messages as the context windows moves forward. */ + truncationStrategy?: TruncationObject | null; + /** Controls whether or not and which tool is called by the model. */ + toolChoice?: + | string + | AssistantsApiToolChoiceOptionMode + | AssistantsNamedToolChoice; + /** Specifies the format that the model must output. */ + responseFormat?: + | string + | AssistantsApiResponseFormatMode + | AssistantsApiResponseFormat; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record; +} + +export function createAndRunThreadOptionsSerializer( + item: CreateAndRunThreadOptions, +): CreateAndRunThreadOptionsRest { + return { + assistant_id: item["assistantId"], + thread: !item.thread + ? item.thread + : assistantThreadCreationOptionsSerializer(item.thread), + model: item["model"], + instructions: item["instructions"], + tools: item["tools"], + tool_resources: !item.toolResources + ? item.toolResources + : updateToolResourcesOptionsSerializer(item.toolResources), + stream: item["stream"], + temperature: item["temperature"], + top_p: item["topP"], + max_prompt_tokens: item["maxPromptTokens"], + max_completion_tokens: item["maxCompletionTokens"], + truncation_strategy: !item.truncationStrategy + ? item.truncationStrategy + : truncationObjectSerializer(item.truncationStrategy), + tool_choice: item["toolChoice"], + response_format: item["responseFormat"], + metadata: !item.metadata + ? item.metadata + : (serializeRecord(item.metadata as any) as any), + }; +} + +/** Detailed information about a single step of an assistant thread run. */ +export interface RunStep { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always 'thread.run.step'. */ + object: "thread.run.step"; + /** The type of run step, which can be either message_creation or tool_calls. */ + type: RunStepType; + /** The ID of the assistant associated with the run step. */ + assistantId: string; + /** The ID of the thread that was run. */ + threadId: string; + /** The ID of the run that this run step is a part of. */ + runId: string; + /** The status of this run step. */ + status: RunStepStatus; + /** The details for this run step. */ + stepDetails: RunStepDetailsUnion; + /** If applicable, information about the last error encountered by this run step. */ + lastError: RunStepError | null; + /** The Unix timestamp, in seconds, representing when this object was created. */ + createdAt: Date; + /** The Unix timestamp, in seconds, representing when this item expired. */ + expiredAt: Date | null; + /** The Unix timestamp, in seconds, representing when this completed. */ + completedAt: Date | null; + /** The Unix timestamp, in seconds, representing when this was cancelled. */ + cancelledAt: Date | null; + /** The Unix timestamp, in seconds, representing when this failed. */ + failedAt: Date | null; + /** Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. */ + usage?: RunStepCompletionUsage | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata: Record; +} + +/** The possible types of run steps. */ +export type RunStepType = "message_creation" | "tool_calls"; +/** Possible values for the status of a run step. */ +export type RunStepStatus = + | "in_progress" + | "cancelled" + | "failed" + | "completed" + | "expired"; + +/** An abstract representation of the details for a run step. */ +export interface RunStepDetails { + /** the discriminator possible values: message_creation, tool_calls */ + type: RunStepType; +} + +/** The detailed information associated with a message creation run step. */ +export interface RunStepMessageCreationDetails extends RunStepDetails { + /** The object type, which is always 'message_creation'. */ + type: "message_creation"; + /** Information about the message creation associated with this run step. */ + messageCreation: RunStepMessageCreationReference; +} + +/** The details of a message created as a part of a run step. */ +export interface RunStepMessageCreationReference { + /** The ID of the message created by this run step. */ + messageId: string; +} + +/** The detailed information associated with a run step calling tools. */ +export interface RunStepToolCallDetails extends RunStepDetails { + /** The object type, which is always 'tool_calls'. */ + type: "tool_calls"; + /** A list of tool call details for this run step. */ + toolCalls: RunStepToolCallUnion[]; +} + +/** An abstract representation of a detailed tool call as recorded within a run step for an existing run. */ +export interface RunStepToolCall { + /** the discriminator possible values: code_interpreter, file_search, function */ + type: string; + /** The ID of the tool call. This ID must be referenced when you submit tool outputs. */ + id: string; } /** - * A representation of the additional context information available when Azure OpenAI chat extensions are involved - * in the generation of a corresponding chat completions response. This context information is only populated when - * using an Azure OpenAI request configured to use a matching extension. + * A record of a call to a code interpreter tool, issued by the model in evaluation of a defined tool, that + * represents inputs and outputs consumed and emitted by the code interpreter. */ -export interface AzureChatExtensionsMessageContext { - /** - * The contextual information associated with the Azure chat extensions used for a chat completions request. - * These messages describe the data source retrievals, plugin invocations, and other intermediate steps taken in the - * course of generating a chat completions response that was augmented by capabilities from Azure OpenAI chat - * extensions. - */ - citations?: AzureChatExtensionDataSourceResponseCitation[]; - /** The detected intent from the chat history, used to pass to the next turn to carry over the context. */ - intent?: string; +export interface RunStepCodeInterpreterToolCall extends RunStepToolCall { + /** The object type, which is always 'code_interpreter'. */ + type: "code_interpreter"; + /** The details of the tool call to the code interpreter tool. */ + codeInterpreter: RunStepCodeInterpreterToolCallDetails; } -/** - * A single instance of additional context information available when Azure OpenAI chat extensions are involved - * in the generation of a corresponding chat completions response. This context information is only populated when - * using an Azure OpenAI request configured to use a matching extension. - */ -export interface AzureChatExtensionDataSourceResponseCitation { - /** The content of the citation. */ - content: string; - /** The title of the citation. */ - title?: string; - /** The URL of the citation. */ - url?: string; - /** The file path of the citation. */ - filepath?: string; - /** The chunk ID of the citation. */ - chunkId?: string; -} - -/** Log probability information for a choice, as requested via 'logprobs' and 'top_logprobs'. */ -export interface ChatChoiceLogProbabilityInfo { - /** The list of log probability information entries for the choice's message content tokens, as requested via the 'logprobs' option. */ - content: ChatTokenLogProbabilityResult[] | null; -} - -/** A representation of the log probability information for a single content token, including a list of most likely tokens if 'top_logprobs' were requested. */ -export interface ChatTokenLogProbabilityResult { - /** The message content token. */ - token: string; - /** The log probability of the message content token. */ - logprob: number; - /** A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token. */ - bytes: number[] | null; - /** The list of most likely tokens and their log probability information, as requested via 'top_logprobs'. */ - topLogprobs: ChatTokenLogProbabilityInfo[] | null; -} - -/** A representation of the log probability information for a single message content token. */ -export interface ChatTokenLogProbabilityInfo { - /** The message content token. */ - token: string; - /** The log probability of the message content token. */ - logprob: number; - /** A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token. */ - bytes: number[] | null; -} - -/** An abstract representation of structured information about why a chat completions response terminated. */ -export interface ChatFinishDetails { - /** the discriminator possible values: stop, max_tokens */ +/** The detailed information about a code interpreter invocation by the model. */ +export interface RunStepCodeInterpreterToolCallDetails { + /** The input provided by the model to the code interpreter tool. */ + input: string; + /** The outputs produced by the code interpreter tool back to the model in response to the tool call. */ + outputs: RunStepCodeInterpreterToolCallOutputUnion[]; +} + +/** An abstract representation of an emitted output from a code interpreter tool. */ +export interface RunStepCodeInterpreterToolCallOutput { + /** the discriminator possible values: logs, image */ type: string; } -/** A structured representation of a stop reason that signifies natural termination by the model. */ -export interface StopFinishDetails extends ChatFinishDetails { - /** The object type, which is always 'stop' for this object. */ - type: "stop"; - /** The token sequence that the model terminated with. */ - stop: string; +/** A representation of a log output emitted by a code interpreter tool in response to a tool call by the model. */ +export interface RunStepCodeInterpreterLogOutput + extends RunStepCodeInterpreterToolCallOutput { + /** The object type, which is always 'logs'. */ + type: "logs"; + /** The serialized log output emitted by the code interpreter. */ + logs: string; +} + +/** A representation of an image output emitted by a code interpreter tool in response to a tool call by the model. */ +export interface RunStepCodeInterpreterImageOutput + extends RunStepCodeInterpreterToolCallOutput { + /** The object type, which is always 'image'. */ + type: "image"; + /** Referential information for the image associated with this output. */ + image: RunStepCodeInterpreterImageReference; +} + +/** An image reference emitted by a code interpreter tool in response to a tool call by the model. */ +export interface RunStepCodeInterpreterImageReference { + /** The ID of the file associated with this image. */ + fileId: string; } /** - * A structured representation of a stop reason that signifies a token limit was reached before the model could naturally - * complete. + * A record of a call to a file search tool, issued by the model in evaluation of a defined tool, that represents + * executed file search. */ -export interface MaxTokensFinishDetails extends ChatFinishDetails { - /** The object type, which is always 'max_tokens' for this object. */ - type: "max_tokens"; +export interface RunStepFileSearchToolCall extends RunStepToolCall { + /** The object type, which is always 'file_search'. */ + type: "file_search"; + /** Reserved for future use. */ + fileSearch: Record; } /** - * Represents the output results of Azure enhancements to chat completions, as configured via the matching input provided - * in the request. + * A record of a call to a function tool, issued by the model in evaluation of a defined tool, that represents the inputs + * and output consumed and emitted by the specified function. */ -export interface AzureChatEnhancements { - /** The grounding enhancement that returns the bounding box of the objects detected in the image. */ - grounding?: AzureGroundingEnhancement; +export interface RunStepFunctionToolCall extends RunStepToolCall { + /** The object type, which is always 'function'. */ + type: "function"; + /** The detailed information about the function called by the model. */ + function: RunStepFunctionToolCallDetails; } -/** The grounding enhancement that returns the bounding box of the objects detected in the image. */ -export interface AzureGroundingEnhancement { - /** The lines of text detected by the grounding enhancement. */ - lines: AzureGroundingEnhancementLine[]; +/** The detailed information about the function called by the model. */ +export interface RunStepFunctionToolCallDetails { + /** The name of the function. */ + name: string; + /** The arguments that the model requires are provided to the named function. */ + arguments: string; + /** The output of the function, only populated for function calls that have already have had their outputs submitted. */ + output: string | null; } -/** A content line object consisting of an adjacent sequence of content elements, such as words and selection marks. */ -export interface AzureGroundingEnhancementLine { - /** The text within the line. */ - text: string; - /** An array of spans that represent detected objects and its bounding box information. */ - spans: AzureGroundingEnhancementLineSpan[]; +/** The error information associated with a failed run step. */ +export interface RunStepError { + /** The error code for this error. */ + code: RunStepErrorCode; + /** The human-readable text associated with this error. */ + message: string; } -/** A span object that represents a detected object and its bounding box information. */ -export interface AzureGroundingEnhancementLineSpan { - /** The text content of the span that represents the detected object. */ - text: string; - /** - * The character offset within the text where the span begins. This offset is defined as the position of the first - * character of the span, counting from the start of the text as Unicode codepoints. - */ - offset: number; - /** The length of the span in characters, measured in Unicode codepoints. */ - length: number; - /** An array of objects representing points in the polygon that encloses the detected object. */ - polygon: AzureGroundingEnhancementCoordinatePoint[]; +/** Possible error code values attributable to a failed run step. */ +export type RunStepErrorCode = "server_error" | "rate_limit_exceeded"; + +/** Usage statistics related to the run step. */ +export interface RunStepCompletionUsage { + /** Number of completion tokens used over the course of the run step. */ + completionTokens: number; + /** Number of prompt tokens used over the course of the run step. */ + promptTokens: number; + /** Total number of tokens used (prompt + completion). */ + totalTokens: number; } -/** A representation of a single polygon point as used by the Azure grounding enhancement. */ -export interface AzureGroundingEnhancementCoordinatePoint { - /** The x-coordinate (horizontal axis) of the point. */ - x: number; - /** The y-coordinate (vertical axis) of the point. */ - y: number; +/** The response data for a requested list of items. */ +export interface OpenAIPageableListOfRunStep { + /** The object type, which is always list. */ + object: "list"; + /** The requested list of items. */ + data: RunStep[]; + /** The first ID represented in this list. */ + firstId: string; + /** The last ID represented in this list. */ + lastId: string; + /** A value indicating whether there are additional values available not captured in this list. */ + hasMore: boolean; +} + +/** The possible values denoting the intended usage of a file. */ +export type FilePurpose = + | "fine-tune" + | "fine-tune-results" + | "assistants" + | "assistants_output" + | "batch" + | "batch_output" + | "vision"; + +/** The response data from a file list operation. */ +export interface FileListResponse { + /** The object type, which is always 'list'. */ + object: "list"; + /** The files returned for the request. */ + data: OpenAIFile[]; +} + +/** Represents an assistant that can call the model and use tools. */ +export interface OpenAIFile { + /** The object type, which is always 'file'. */ + object: "file"; + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The size of the file, in bytes. */ + bytes: number; + /** The name of the file. */ + filename: string; + /** The Unix timestamp, in seconds, representing when this object was created. */ + createdAt: Date; + /** The intended purpose of a file. */ + purpose: FilePurpose; + /** The state of the file. This field is available in Azure OpenAI only. */ + status?: FileState; + /** The error message with details in case processing of this file failed. This field is available in Azure OpenAI only. */ + statusDetails?: string; +} + +/** The state of the file. */ +export type FileState = + | "uploaded" + | "pending" + | "running" + | "processed" + | "error" + | "deleting" + | "deleted"; + +/** A status response from a file deletion operation. */ +export interface FileDeletionStatus { + /** The ID of the resource specified for deletion. */ + id: string; + /** A value indicating whether deletion was successful. */ + deleted: boolean; + /** The object type, which is always 'file'. */ + object: "file"; +} + +/** The response data for a requested list of items. */ +export interface OpenAIPageableListOfVectorStore { + /** The object type, which is always list. */ + object: "list"; + /** The requested list of items. */ + data: VectorStore[]; + /** The first ID represented in this list. */ + firstId: string; + /** The last ID represented in this list. */ + lastId: string; + /** A value indicating whether there are additional values available not captured in this list. */ + hasMore: boolean; +} + +/** A vector store is a collection of processed files can be used by the `file_search` tool. */ +export interface VectorStore { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always `vector_store` */ + object: "vector_store"; + /** The Unix timestamp (in seconds) for when the vector store was created. */ + createdAt: Date; + /** The name of the vector store. */ + name: string; + /** The total number of bytes used by the files in the vector store. */ + usageBytes: number; + /** Files count grouped by status processed or being processed by this vector store. */ + fileCounts: VectorStoreFileCount; + /** The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. */ + status: VectorStoreStatus; + /** Details on when this vector store expires */ + expiresAfter?: VectorStoreExpirationPolicy; + /** The Unix timestamp (in seconds) for when the vector store will expire. */ + expiresAt?: Date | null; + /** The Unix timestamp (in seconds) for when the vector store was last active. */ + lastActiveAt: Date | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata: Record; +} + +/** Counts of files processed or being processed by this vector store grouped by status. */ +export interface VectorStoreFileCount { + /** The number of files that are currently being processed. */ + inProgress: number; + /** The number of files that have been successfully processed. */ + completed: number; + /** The number of files that have failed to process. */ + failed: number; + /** The number of files that were cancelled. */ + cancelled: number; + /** The total number of files. */ + total: number; +} + +/** Vector store possible status */ +export type VectorStoreStatus = "expired" | "in_progress" | "completed"; + +/** The expiration policy for a vector store. */ +export interface VectorStoreExpirationPolicy { + /** Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. */ + anchor: VectorStoreExpirationPolicyAnchor; + /** The anchor timestamp after which the expiration policy applies. */ + days: number; +} + +export function vectorStoreExpirationPolicySerializer( + item: VectorStoreExpirationPolicy, +): VectorStoreExpirationPolicyRest { + return { + anchor: item["anchor"], + days: item["days"], + }; +} + +/** Describes the relationship between the days and the expiration of this vector store */ +export type VectorStoreExpirationPolicyAnchor = "last_active_at"; + +/** Request object for creating a vector store. */ +export interface VectorStoreOptions { + /** A list of file IDs that the vector store should use. Useful for tools like `file_search` that can access files. */ + fileIds?: string[]; + /** The name of the vector store. */ + name?: string; + /** Details on when this vector store expires */ + expiresAfter?: VectorStoreExpirationPolicy; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record; +} + +export function vectorStoreOptionsSerializer( + item: VectorStoreOptions, +): VectorStoreOptionsRest { + return { + file_ids: item["fileIds"], + name: item["name"], + expires_after: !item.expiresAfter + ? item.expiresAfter + : vectorStoreExpirationPolicySerializer(item.expiresAfter), + metadata: !item.metadata + ? item.metadata + : (serializeRecord(item.metadata as any) as any), + }; +} + +/** Request object for updating a vector store. */ +export interface VectorStoreUpdateOptions { + /** The name of the vector store. */ + name?: string | null; + /** Details on when this vector store expires */ + expiresAfter?: VectorStoreExpirationPolicy | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record; +} + +export function vectorStoreUpdateOptionsSerializer( + item: VectorStoreUpdateOptions, +): VectorStoreUpdateOptionsRest { + return { + name: item["name"], + expires_after: !item.expiresAfter + ? item.expiresAfter + : vectorStoreExpirationPolicySerializer(item.expiresAfter), + metadata: !item.metadata + ? item.metadata + : (serializeRecord(item.metadata as any) as any), + }; +} + +/** Response object for deleting a vector store. */ +export interface VectorStoreDeletionStatus { + /** The ID of the resource specified for deletion. */ + id: string; + /** A value indicating whether deletion was successful. */ + deleted: boolean; + /** The object type, which is always 'vector_store.deleted'. */ + object: "vector_store.deleted"; +} + +/** Query parameter filter for vector store file retrieval endpoint */ +export type VectorStoreFileStatusFilter = + | "in_progress" + | "completed" + | "failed" + | "cancelled"; + +/** The response data for a requested list of items. */ +export interface OpenAIPageableListOfVectorStoreFile { + /** The object type, which is always list. */ + object: "list"; + /** The requested list of items. */ + data: VectorStoreFile[]; + /** The first ID represented in this list. */ + firstId: string; + /** The last ID represented in this list. */ + lastId: string; + /** A value indicating whether there are additional values available not captured in this list. */ + hasMore: boolean; +} + +/** Description of a file attached to a vector store. */ +export interface VectorStoreFile { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always `vector_store.file`. */ + object: "vector_store.file"; + /** + * The total vector store usage in bytes. Note that this may be different from the original file + * size. + */ + usageBytes: number; + /** The Unix timestamp (in seconds) for when the vector store file was created. */ + createdAt: Date; + /** The ID of the vector store that the file is attached to. */ + vectorStoreId: string; + /** The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. */ + status: VectorStoreFileStatus; + /** The last error associated with this vector store file. Will be `null` if there are no errors. */ + lastError: VectorStoreFileError | null; +} + +/** Vector store file status */ +export type VectorStoreFileStatus = + | "in_progress" + | "completed" + | "failed" + | "cancelled"; + +/** Details on the error that may have ocurred while processing a file for this vector store */ +export interface VectorStoreFileError { + /** One of `server_error` or `rate_limit_exceeded`. */ + code: VectorStoreFileErrorCode; + /** A human-readable description of the error. */ + message: string; +} + +/** Error code variants for vector store file processing */ +export type VectorStoreFileErrorCode = + | "internal_error" + | "file_not_found" + | "parsing_error" + | "unhandled_mime_type"; + +/** Response object for deleting a vector store file relationship. */ +export interface VectorStoreFileDeletionStatus { + /** The ID of the resource specified for deletion. */ + id: string; + /** A value indicating whether deletion was successful. */ + deleted: boolean; + /** The object type, which is always 'vector_store.deleted'. */ + object: "vector_store.file.deleted"; } -/** Represents the request data used to generate images. */ -export interface ImageGenerationOptions { - /** - * The model name or Azure OpenAI model deployment name to use for image generation. If not specified, dall-e-2 will be - * inferred as a default. - */ - model?: string; - /** A description of the desired images. */ - prompt: string; - /** - * The number of images to generate. - * Dall-e-2 models support values between 1 and 10. - * Dall-e-3 models only support a value of 1. - */ - n?: number; - /** - * The desired dimensions for generated images. - * Dall-e-2 models support 256x256, 512x512, or 1024x1024. - * Dall-e-3 models support 1024x1024, 1792x1024, or 1024x1792. - */ - size?: ImageSize; - /** The format in which image generation response items should be presented. */ - responseFormat?: ImageGenerationResponseFormat; - /** - * The desired image generation quality level to use. - * Only configurable with dall-e-3 models. - */ - quality?: ImageGenerationQuality; - /** - * The desired image generation style to use. - * Only configurable with dall-e-3 models. - */ - style?: ImageGenerationStyle; - /** A unique identifier representing your end-user, which can help to monitor and detect abuse. */ - user?: string; +/** A batch of files attached to a vector store. */ +export interface VectorStoreFileBatch { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always `vector_store.file_batch`. */ + object: "vector_store.files_batch"; + /** The Unix timestamp (in seconds) for when the vector store files batch was created. */ + createdAt: Date; + /** The ID of the vector store that the file is attached to. */ + vectorStoreId: string; + /** The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. */ + status: VectorStoreFileBatchStatus; + /** Files count grouped by status processed or being processed by this vector store. */ + fileCounts: VectorStoreFileCount; +} + +/** The status of the vector store file batch. */ +export type VectorStoreFileBatchStatus = + | "in_progress" + | "completed" + | "cancelled" + | "failed"; + +/** Represents a message delta i.e. any changed fields on a message during streaming. */ +export interface MessageDeltaChunk { + /** The identifier of the message, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always `thread.message.delta`. */ + object: "thread.message.delta"; + /** The delta containing the fields that have changed on the Message. */ + delta: MessageDelta; } -/** The desired size of generated images. */ -/** "256x256", "512x512", "1024x1024", "1792x1024", "1024x1792" */ -export type ImageSize = string; -/** The format in which the generated images are returned. */ -/** "url", "b64_json" */ -export type ImageGenerationResponseFormat = string; -/** - * An image generation configuration that specifies how the model should prioritize quality, cost, and speed. - * Only configurable with dall-e-3 models. - */ -/** "standard", "hd" */ -export type ImageGenerationQuality = string; -/** - * An image generation configuration that specifies how the model should incorporate realism and other visual characteristics. - * Only configurable with dall-e-3 models. - */ -/** "natural", "vivid" */ -export type ImageGenerationStyle = string; +/** Represents the typed 'delta' payload within a streaming message delta chunk. */ +export interface MessageDelta { + /** The entity that produced the message. */ + role: MessageRole; + /** The content of the message as an array of text and/or images. */ + content: MessageDeltaContentUnion[]; +} -/** The result of a successful image generation operation. */ -export interface ImageGenerations { - /** - * A timestamp representing when this operation was started. - * Expressed in seconds since the Unix epoch of 1970-01-01T00:00:00+0000. - */ - created: Date; - /** The images generated by the operation. */ - data: ImageGenerationData[]; +/** The abstract base representation of a partial streamed message content payload. */ +export interface MessageDeltaContent { + /** The index of the content part of the message. */ + index: number; + /** the discriminator possible values: image_file, text */ + type: string; } -/** - * A representation of a single generated image, provided as either base64-encoded data or as a URL from which the image - * may be retrieved. - */ -export interface ImageGenerationData { - /** The URL that provides temporary access to download the generated image. */ - url?: string; - /** The complete data for an image, represented as a base64-encoded string. */ - base64Data?: string; - /** Information about the content filtering results. */ - contentFilterResults?: ImageGenerationContentFilterResults; - /** - * The final prompt used by the model to generate the image. - * Only provided with dall-3-models and only when revisions were made to the prompt. - */ - revisedPrompt?: string; - /** - * Information about the content filtering category (hate, sexual, violence, self_harm), if - * it has been detected, as well as the severity level (very_low, low, medium, high-scale - * that determines the intensity and risk level of harmful content) and if it has been - * filtered or not. Information about jailbreak content and profanity, if it has been detected, - * and if it has been filtered or not. And information about customer block list, if it has - * been filtered and its id. - */ - promptFilterResults?: ImageGenerationPromptFilterResults; +/** Represents a streamed image file content part within a streaming message delta chunk. */ +export interface MessageDeltaImageFileContent extends MessageDeltaContent { + /** The type of content for this content part, which is always "image_file." */ + type: "image_file"; + /** The image_file data. */ + imageFile?: MessageDeltaImageFileContentObject; } -/** Describes the content filtering result for the image generation request. */ -export interface ImageGenerationContentFilterResults { - /** - * Describes language related to anatomical organs and genitals, romantic relationships, - * acts portrayed in erotic or affectionate terms, physical sexual acts, including - * those portrayed as an assault or a forced sexual violent act against one’s will, - * prostitution, pornography, and abuse. - */ - sexual?: ContentFilterResult; - /** - * Describes language related to physical actions intended to hurt, injure, damage, or - * kill someone or something; describes weapons, etc. - */ - violence?: ContentFilterResult; - /** - * Describes language attacks or uses that include pejorative or discriminatory language - * with reference to a person or identity group on the basis of certain differentiating - * attributes of these groups including but not limited to race, ethnicity, nationality, - * gender identity and expression, sexual orientation, religion, immigration status, ability - * status, personal appearance, and body size. - */ - hate?: ContentFilterResult; - /** - * Describes language related to physical actions intended to purposely hurt, injure, - * or damage one’s body, or kill oneself. - */ - selfHarm?: ContentFilterResult; +/** Represents the 'image_file' payload within streaming image file content. */ +export interface MessageDeltaImageFileContentObject { + /** The file ID of the image in the message content. */ + fileId?: string; } -/** Describes the content filtering results for the prompt of a image generation request. */ -export interface ImageGenerationPromptFilterResults { - /** - * Describes language related to anatomical organs and genitals, romantic relationships, - * acts portrayed in erotic or affectionate terms, physical sexual acts, including - * those portrayed as an assault or a forced sexual violent act against one’s will, - * prostitution, pornography, and abuse. - */ - sexual?: ContentFilterResult; - /** - * Describes language related to physical actions intended to hurt, injure, damage, or - * kill someone or something; describes weapons, etc. - */ - violence?: ContentFilterResult; - /** - * Describes language attacks or uses that include pejorative or discriminatory language - * with reference to a person or identity group on the basis of certain differentiating - * attributes of these groups including but not limited to race, ethnicity, nationality, - * gender identity and expression, sexual orientation, religion, immigration status, ability - * status, personal appearance, and body size. - */ - hate?: ContentFilterResult; - /** - * Describes language related to physical actions intended to purposely hurt, injure, - * or damage one’s body, or kill oneself. - */ - selfHarm?: ContentFilterResult; - /** Describes whether profanity was detected. */ - profanity?: ContentFilterDetectionResult; - /** Whether a jailbreak attempt was detected in the prompt. */ - jailbreak?: ContentFilterDetectionResult; +/** Represents a streamed text content part within a streaming message delta chunk. */ +export interface MessageDeltaTextContentObject extends MessageDeltaContent { + /** The type of content for this content part, which is always "text." */ + type: "text"; + /** The text content details. */ + text?: MessageDeltaTextContent; } -/** - * The configuration information for an embeddings request. - * Embeddings measure the relatedness of text strings and are commonly used for search, clustering, - * recommendations, and other similar scenarios. - */ -export interface EmbeddingsOptions { - /** - * An identifier for the caller or end user of the operation. This may be used for tracking - * or rate-limiting purposes. - */ - user?: string; - /** - * The model name to provide as part of this embeddings request. - * Not applicable to Azure OpenAI, where deployment information should be included in the Azure - * resource URI that's connected to. - */ - model?: string; - /** - * Input texts to get embeddings for, encoded as a an array of strings. - * Each input must not exceed 2048 tokens in length. - * - * Unless you are embedding code, we suggest replacing newlines (\\n) in your input with a single space, - * as we have observed inferior results when newlines are present. - */ - input: string[]; - /** The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. */ - dimensions?: number; +/** Represents the data of a streamed text content part within a streaming message delta chunk. */ +export interface MessageDeltaTextContent { + /** The data that makes up the text. */ + value?: string; + /** Annotations for the text. */ + annotations?: MessageDeltaTextAnnotationUnion[]; } -/** - * Representation of the response data from an embeddings request. - * Embeddings measure the relatedness of text strings and are commonly used for search, clustering, - * recommendations, and other similar scenarios. - */ -export interface Embeddings { - /** Embedding values for the prompts submitted in the request. */ - data: EmbeddingItem[]; - /** Usage counts for tokens input using the embeddings API. */ - usage: EmbeddingsUsage; +/** The abstract base representation of a streamed text content part's text annotation. */ +export interface MessageDeltaTextAnnotation { + /** The index of the annotation within a text content part. */ + index: number; + /** the discriminator possible values: file_citation, file_path */ + type: string; } -/** Representation of a single embeddings relatedness comparison. */ -export interface EmbeddingItem { - /** - * List of embeddings value for the input prompt. These represent a measurement of the - * vector-based relatedness of the provided input. - */ - embedding: number[]; - /** Index of the prompt to which the EmbeddingItem corresponds. */ +/** Represents a streamed file citation applied to a streaming text content part. */ +export interface MessageDeltaTextFileCitationAnnotationObject + extends MessageDeltaTextAnnotation { + /** The type of the text content annotation, which is always "file_citation." */ + type: "file_citation"; + /** The file citation information. */ + fileCitation?: MessageDeltaTextFileCitationAnnotation; + /** The text in the message content that needs to be replaced */ + text?: string; + /** The start index of this annotation in the content text. */ + startIndex?: number; + /** The end index of this annotation in the content text. */ + endIndex?: number; +} + +/** Represents the data of a streamed file citation as applied to a streaming text content part. */ +export interface MessageDeltaTextFileCitationAnnotation { + /** The ID of the specific file the citation is from. */ + fileId?: string; + /** The specific quote in the cited file. */ + quote?: string; +} + +/** Represents a streamed file path annotation applied to a streaming text content part. */ +export interface MessageDeltaTextFilePathAnnotationObject + extends MessageDeltaTextAnnotation { + /** The type of the text content annotation, which is always "file_path." */ + type: "file_path"; + /** The file path information. */ + filePath?: MessageDeltaTextFilePathAnnotation; + /** The start index of this annotation in the content text. */ + startIndex?: number; + /** The end index of this annotation in the content text. */ + endIndex?: number; + /** The text in the message content that needs to be replaced */ + text?: string; +} + +/** Represents the data of a streamed file path annotation as applied to a streaming text content part. */ +export interface MessageDeltaTextFilePathAnnotation { + /** The file ID for the annotation. */ + fileId?: string; +} + +/** Represents a run step delta i.e. any changed fields on a run step during streaming. */ +export interface RunStepDeltaChunk { + /** The identifier of the run step, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always `thread.run.step.delta`. */ + object: "thread.run.step.delta"; + /** The delta containing the fields that have changed on the run step. */ + delta: RunStepDelta; +} + +/** Represents the delta payload in a streaming run step delta chunk. */ +export interface RunStepDelta { + /** The details of the run step. */ + stepDetails?: RunStepDeltaDetailUnion; +} + +/** Represents a single run step detail item in a streaming run step's delta payload. */ +export interface RunStepDeltaDetail { + /** the discriminator possible values: message_creation, tool_calls */ + type: string; +} + +/** Represents a message creation within a streaming run step delta. */ +export interface RunStepDeltaMessageCreation extends RunStepDeltaDetail { + /** The object type, which is always "message_creation." */ + type: "message_creation"; + /** The message creation data. */ + messageCreation?: RunStepDeltaMessageCreationObject; +} + +/** Represents the data within a streaming run step message creation response object. */ +export interface RunStepDeltaMessageCreationObject { + /** The ID of the newly-created message. */ + messageId?: string; +} + +/** Represents an invocation of tool calls as part of a streaming run step. */ +export interface RunStepDeltaToolCallObject extends RunStepDeltaDetail { + /** The object type, which is always "tool_calls." */ + type: "tool_calls"; + /** The collection of tool calls for the tool call detail item. */ + toolCalls?: RunStepDeltaToolCallUnion[]; +} + +/** The abstract base representation of a single tool call within a streaming run step's delta tool call details. */ +export interface RunStepDeltaToolCall { + /** The index of the tool call detail in the run step's tool_calls array. */ index: number; + /** The ID of the tool call, used when submitting outputs to the run. */ + id: string; + /** the discriminator possible values: function, file_search, code_interpreter */ + type: string; } -/** Measurement of the amount of tokens used in this request and response. */ -export interface EmbeddingsUsage { - /** Number of tokens sent in the original request. */ - promptTokens: number; - /** Total number of tokens transacted in this request/response. */ - totalTokens: number; +/** Represents a function tool call within a streaming run step's tool call details. */ +export interface RunStepDeltaFunctionToolCall extends RunStepDeltaToolCall { + /** The object type, which is always "function." */ + type: "function"; + /** The function data for the tool call. */ + function?: RunStepDeltaFunction; } -/** - * The OpenAI error class - */ -export class OpenAIError extends RestError { - /** - * The type of the error - */ - public type: string | null; +/** Represents the function data in a streaming run step delta's function tool call. */ +export interface RunStepDeltaFunction { + /** The name of the function. */ + name?: string; + /** The arguments passed to the function as input. */ + arguments?: string; + /** The output of the function, null if outputs have not yet been submitted. */ + output?: string | null; +} + +/** Represents a file search tool call within a streaming run step's tool call details. */ +export interface RunStepDeltaFileSearchToolCall extends RunStepDeltaToolCall { + /** The object type, which is always "file_search." */ + type: "file_search"; + /** Reserved for future use. */ + fileSearch?: Record; +} + +/** Represents a Code Interpreter tool call within a streaming run step's tool call details. */ +export interface RunStepDeltaCodeInterpreterToolCall + extends RunStepDeltaToolCall { + /** The object type, which is always "code_interpreter." */ + type: "code_interpreter"; + /** The Code Interpreter data for the tool call. */ + codeInterpreter?: RunStepDeltaCodeInterpreterDetailItemObject; +} + +/** Represents the Code Interpreter tool call data in a streaming run step's tool calls. */ +export interface RunStepDeltaCodeInterpreterDetailItemObject { + /** The input into the Code Interpreter tool call. */ + input?: string; /** - * The param meter of the error + * The outputs from the Code Interpreter tool call. Code Interpreter can output one or more + * items, including text (`logs`) or images (`image`). Each of these are represented by a + * different object type. */ - public param: string | null; - constructor( - message: string, - param: string | null = null, - type: string | null = null, - options?: RestErrorOptions, - ) { - super(message, { code: options?.code ?? undefined, ...options }); - this.name = "OpenAIError"; - this.type = type; - this.param = param; - } + outputs?: RunStepDeltaCodeInterpreterOutputUnion[]; +} + +/** The abstract base representation of a streaming run step tool call's Code Interpreter tool output. */ +export interface RunStepDeltaCodeInterpreterOutput { + /** The index of the output in the streaming run step tool call's Code Interpreter outputs array. */ + index: number; + /** the discriminator possible values: logs, image */ + type: string; } -/** Alias for ChatRequestMessageUnion */ -export type ChatRequestMessageUnion = - | ChatRequestSystemMessage - | ChatRequestUserMessage - | ChatRequestAssistantMessage - | ChatRequestToolMessage - | ChatRequestFunctionMessage - | ChatRequestMessage; -/** Alias for ChatMessageContentItemUnion */ -export type ChatMessageContentItemUnion = - | ChatMessageTextContentItem - | ChatMessageImageContentItem - | ChatMessageContentItem; -/** Alias for ChatCompletionsToolCallUnion */ -export type ChatCompletionsToolCallUnion = - | ChatCompletionsFunctionToolCall - | ChatCompletionsToolCall; -/** Alias for AzureChatExtensionConfigurationUnion */ -export type AzureChatExtensionConfigurationUnion = - | AzureSearchChatExtensionConfiguration - | AzureMachineLearningIndexChatExtensionConfiguration - | AzureCosmosDBChatExtensionConfiguration - | ElasticsearchChatExtensionConfiguration - | PineconeChatExtensionConfiguration - | AzureChatExtensionConfiguration; -/** Alias for OnYourDataAuthenticationOptionsUnion */ -export type OnYourDataAuthenticationOptionsUnion = - | OnYourDataApiKeyAuthenticationOptions - | OnYourDataConnectionStringAuthenticationOptions - | OnYourDataKeyAndKeyIdAuthenticationOptions - | OnYourDataEncodedApiKeyAuthenticationOptions - | OnYourDataAccessTokenAuthenticationOptions - | OnYourDataSystemAssignedManagedIdentityAuthenticationOptions - | OnYourDataUserAssignedManagedIdentityAuthenticationOptions - | OnYourDataAuthenticationOptions; -/** Alias for OnYourDataVectorizationSourceUnion */ -export type OnYourDataVectorizationSourceUnion = - | OnYourDataEndpointVectorizationSource - | OnYourDataDeploymentNameVectorizationSource - | OnYourDataModelIdVectorizationSource - | OnYourDataVectorizationSource; -/** Alias for ChatCompletionsResponseFormatUnion */ -export type ChatCompletionsResponseFormatUnion = - | ChatCompletionsTextResponseFormat - | ChatCompletionsJsonResponseFormat - | ChatCompletionsResponseFormat; -/** Alias for ChatCompletionsToolDefinitionUnion */ -export type ChatCompletionsToolDefinitionUnion = - | ChatCompletionsFunctionToolDefinition - | ChatCompletionsToolDefinition; -/** Alias for ChatCompletionsNamedToolSelectionUnion */ -export type ChatCompletionsNamedToolSelectionUnion = - | ChatCompletionsNamedFunctionToolSelection - | ChatCompletionsToolSelectionPreset - | ChatCompletionsNamedToolSelection; -/** Alias for ChatFinishDetailsUnion */ -export type ChatFinishDetailsUnion = StopFinishDetails | MaxTokensFinishDetails | ChatFinishDetails; -/** A readable stream that is iterable and disposable. */ -export interface EventStream extends ReadableStream, AsyncIterable {} +/** Represents a log output as produced by the Code Interpreter tool and as represented in a streaming run step's delta tool calls collection. */ +export interface RunStepDeltaCodeInterpreterLogOutput + extends RunStepDeltaCodeInterpreterOutput { + /** The type of the object, which is always "logs." */ + type: "logs"; + /** The text output from the Code Interpreter tool call. */ + logs?: string; +} + +/** Represents an image output as produced the Code interpreter tool and as represented in a streaming run step's delta tool calls collection. */ +export interface RunStepDeltaCodeInterpreterImageOutput + extends RunStepDeltaCodeInterpreterOutput { + /** The object type, which is always "image." */ + type: "image"; + /** The image data for the Code Interpreter tool call output. */ + image?: RunStepDeltaCodeInterpreterImageOutputObject; +} + +/** Represents the data for a streaming run step's Code Interpreter tool call image output. */ +export interface RunStepDeltaCodeInterpreterImageOutputObject { + /** The file ID for the image. */ + fileId?: string; +} + +/** Thread operation related streaming events */ +export type ThreadStreamEvent = "thread.created"; +/** Run operation related streaming events */ +export type RunStreamEvent = + | "thread.run.created" + | "thread.run.queued" + | "thread.run.in_progress" + | "thread.run.requires_action" + | "thread.run.completed" + | "thread.run.failed" + | "thread.run.cancelling" + | "thread.run.cancelled" + | "thread.run.expired"; +/** Run step operation related streaming events */ +export type RunStepStreamEvent = + | "thread.run.step.created" + | "thread.run.step.in_progress" + | "thread.run.step.delta" + | "thread.run.step.completed" + | "thread.run.step.failed" + | "thread.run.step.cancelled" + | "thread.run.step.expired"; +/** Message operation related streaming events */ +export type MessageStreamEvent = + | "thread.message.created" + | "thread.message.in_progress" + | "thread.message.delta" + | "thread.message.completed" + | "thread.message.incomplete"; +/** Terminal event indicating a server side error while streaming. */ +export type ErrorEvent = "error"; +/** Terminal event indicating the successful end of a stream. */ +export type DoneEvent = "done"; +/** The known set of supported API versions. */ +export type ServiceApiVersions = "2024-02-15-preview" | "2024-05-01-preview"; +/** Alias for ToolDefinitionUnion */ +export type ToolDefinitionUnion = + | CodeInterpreterToolDefinition + | FileSearchToolDefinition + | FunctionToolDefinition + | ToolDefinition; +/** Alias for CreateFileSearchToolResourceOptions */ +export type CreateFileSearchToolResourceOptions = + | string[] + | CreateFileSearchToolResourceVectorStoreOptions[]; +/** Alias for MessageAttachmentToolDefinition */ +export type MessageAttachmentToolDefinition = + | CodeInterpreterToolDefinition + | FileSearchToolDefinition; +/** Alias for MessageContentUnion */ +export type MessageContentUnion = + | MessageTextContent + | MessageImageFileContent + | MessageContent; +/** Alias for MessageTextAnnotationUnion */ +export type MessageTextAnnotationUnion = + | MessageTextFileCitationAnnotation + | MessageTextFilePathAnnotation + | MessageTextAnnotation; +/** Alias for RequiredActionUnion */ +export type RequiredActionUnion = SubmitToolOutputsAction | RequiredAction; +/** Alias for RequiredActionUnion */ +export type RequiredActionUnion = SubmitToolOutputsAction | RequiredAction; +/** Alias for RequiredToolCallUnion */ +export type RequiredToolCallUnion = RequiredFunctionToolCall | RequiredToolCall; +/** Alias for RunStepDetailsUnion */ +export type RunStepDetailsUnion = + | RunStepMessageCreationDetails + | RunStepToolCallDetails + | RunStepDetails; +/** Alias for RunStepToolCallUnion */ +export type RunStepToolCallUnion = + | RunStepCodeInterpreterToolCall + | RunStepFileSearchToolCall + | RunStepFunctionToolCall + | RunStepToolCall; +/** Alias for RunStepCodeInterpreterToolCallOutputUnion */ +export type RunStepCodeInterpreterToolCallOutputUnion = + | RunStepCodeInterpreterLogOutput + | RunStepCodeInterpreterImageOutput + | RunStepCodeInterpreterToolCallOutput; +/** Alias for MessageDeltaContentUnion */ +export type MessageDeltaContentUnion = + | MessageDeltaImageFileContent + | MessageDeltaTextContentObject + | MessageDeltaContent; +/** Alias for MessageDeltaTextAnnotationUnion */ +export type MessageDeltaTextAnnotationUnion = + | MessageDeltaTextFileCitationAnnotationObject + | MessageDeltaTextFilePathAnnotationObject + | MessageDeltaTextAnnotation; +/** Alias for RunStepDeltaDetailUnion */ +export type RunStepDeltaDetailUnion = + | RunStepDeltaMessageCreation + | RunStepDeltaToolCallObject + | RunStepDeltaDetail; +/** Alias for RunStepDeltaToolCallUnion */ +export type RunStepDeltaToolCallUnion = + | RunStepDeltaFunctionToolCall + | RunStepDeltaFileSearchToolCall + | RunStepDeltaCodeInterpreterToolCall + | RunStepDeltaToolCall; +/** Alias for RunStepDeltaCodeInterpreterOutputUnion */ +export type RunStepDeltaCodeInterpreterOutputUnion = + | RunStepDeltaCodeInterpreterLogOutput + | RunStepDeltaCodeInterpreterImageOutput + | RunStepDeltaCodeInterpreterOutput; diff --git a/sdk/openai/openai/src/models/options.ts b/sdk/openai/openai/src/models/options.ts index 0a2e59b12790..e5a9aa0c53c9 100644 --- a/sdk/openai/openai/src/models/options.ts +++ b/sdk/openai/openai/src/models/options.ts @@ -3,234 +3,217 @@ import { OperationOptions } from "@azure-rest/core-client"; import { - AzureExtensionsOptions, - ChatCompletionsNamedToolSelectionUnion, - ChatCompletionsResponseFormat, - ChatCompletionsToolDefinitionUnion, - FunctionCallPreset, - FunctionDefinition, - FunctionName, - ImageGenerationQuality, - ImageGenerationResponseFormat, - ImageGenerationStyle, - ImageSize, + ListSortOrder, + FilePurpose, + VectorStoreFileStatusFilter, } from "./models.js"; -export interface GeneratedGetChatCompletionsOptions extends OperationOptions {} - -export interface GetImageGenerationsOptions extends OperationOptions {} - -/** Represents the request data used to generate images. */ -export interface GetImagesOptions extends OperationOptions { - /** - * The number of images to generate. - * Dall-e-3 models only support a value of 1. - */ - n?: number; - /** - * The desired dimensions for generated images. - * Dall-e-3 models support 1024x1024, 1792x1024, or 1024x1792. - */ - size?: ImageSize; - /** The format in which image generation response items should be presented. */ - responseFormat?: ImageGenerationResponseFormat; - /** - * The desired image generation quality level to use. - */ - quality?: ImageGenerationQuality; - /** - * The desired image generation style to use. - */ - style?: ImageGenerationStyle; - /** A unique identifier representing your end-user, which can help to monitor and detect abuse. */ - user?: string; +/** Optional parameters. */ +export interface CreateAssistantOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface ListAssistantsOptionalParams extends OperationOptions { + /** A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. */ + limit?: number; + /** Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. */ + order?: ListSortOrder; + /** A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. */ + after?: string; + /** A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. */ + before?: string; +} + +/** Optional parameters. */ +export interface GetAssistantOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface UpdateAssistantOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface DeleteAssistantOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface CreateThreadOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface GetThreadOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface UpdateThreadOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface DeleteThreadOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface CreateMessageOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface ListMessagesOptionalParams extends OperationOptions { + /** Filter messages by the run ID that generated them. */ + runId?: string; + /** A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. */ + limit?: number; + /** Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. */ + order?: ListSortOrder; + /** A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. */ + after?: string; + /** A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. */ + before?: string; +} + +/** Optional parameters. */ +export interface GetMessageOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface UpdateMessageOptionalParams extends OperationOptions { + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record; +} + +/** Optional parameters. */ +export interface CreateRunOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface ListRunsOptionalParams extends OperationOptions { + /** A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. */ + limit?: number; + /** Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. */ + order?: ListSortOrder; + /** A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. */ + after?: string; + /** A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. */ + before?: string; +} + +/** Optional parameters. */ +export interface GetRunOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface UpdateRunOptionalParams extends OperationOptions { + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record; +} + +/** Optional parameters. */ +export interface SubmitToolOutputsToRunOptionalParams extends OperationOptions { + /** If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. */ + stream?: boolean | null; } -/** Options for to custom embeddings request */ -export interface GetEmbeddingsOptions extends OperationOptions { - /** - * An identifier for the caller or end user of the operation. This may be used for tracking - * or rate-limiting purposes. - */ - user?: string; - /** - * The model name to provide as part of this embeddings request. - * Not applicable to Azure OpenAI, where deployment information should be included in the Azure - * resource URI that's connected to. - */ - model?: string; - /** The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. */ - dimensions?: number; +/** Optional parameters. */ +export interface CancelRunOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface CreateThreadAndRunOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface GetRunStepOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface ListRunStepsOptionalParams extends OperationOptions { + /** A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. */ + limit?: number; + /** Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. */ + order?: ListSortOrder; + /** A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. */ + after?: string; + /** A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. */ + before?: string; +} + +/** Optional parameters. */ +export interface ListFilesOptionalParams extends OperationOptions { + /** A value that, when provided, limits list results to files matching the corresponding purpose. */ + purpose?: FilePurpose; +} + +/** Optional parameters. */ +export interface UploadFileOptionalParams extends OperationOptions { + /** The 'content-type' header value, always 'multipart/format-data' for this operation. */ + contentType?: string; + /** A filename to associate with the uploaded data. */ + filename?: string; +} + +/** Optional parameters. */ +export interface DeleteFileOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface GetFileOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface GetFileContentOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface ListVectorStoresOptionalParams extends OperationOptions { + /** A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. */ + limit?: number; + /** Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. */ + order?: ListSortOrder; + /** A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. */ + after?: string; + /** A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. */ + before?: string; } -/** - * The configuration information for a completions request. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. - */ -export interface GetCompletionsOptions extends OperationOptions { - /** The maximum number of tokens to generate. */ - maxTokens?: number; - /** - * The sampling temperature to use that controls the apparent creativity of generated completions. - * Higher values will make output more random while lower values will make results more focused - * and deterministic. - * It is not recommended to modify temperature and top_p for the same completions request as the - * interaction of these two settings is difficult to predict. - */ - temperature?: number; - /** - * An alternative to sampling with temperature called nucleus sampling. This value causes the - * model to consider the results of tokens with the provided probability mass. As an example, a - * value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be - * considered. - * It is not recommended to modify temperature and top_p for the same completions request as the - * interaction of these two settings is difficult to predict. - */ - topP?: number; - /** - * A map between GPT token IDs and bias scores that influences the probability of specific tokens - * appearing in a completions response. Token IDs are computed via external tokenizer tools, while - * bias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to - * a full ban or exclusive selection of a token, respectively. The exact behavior of a given bias - * score varies by model. - */ - logitBias?: Record; - /** - * An identifier for the caller or end user of the operation. This may be used for tracking - * or rate-limiting purposes. - */ - user?: string; - /** - * The number of completions choices that should be generated per provided prompt as part of an - * overall completions response. - * Because this setting can generate many completions, it may quickly consume your token quota. - * Use carefully and ensure reasonable settings for max_tokens and stop. - */ - n?: number; - /** - * A value that controls the emission of log probabilities for the provided number of most likely - * tokens within a completions response. - */ - logprobs?: number; - /** - * A value specifying whether completions responses should include input prompts as prefixes to - * their generated output. - */ - echo?: boolean; - /** A collection of textual sequences that will end completions generation. */ - stop?: string[]; - /** - * A value that influences the probability of generated tokens appearing based on their existing - * presence in generated text. - * Positive values will make tokens less likely to appear when they already exist and increase the - * model's likelihood to output new topics. - */ - presencePenalty?: number; - /** - * A value that influences the probability of generated tokens appearing based on their cumulative - * frequency in generated text. - * Positive values will make tokens less likely to appear as their frequency increases and - * decrease the likelihood of the model repeating the same statements verbatim. - */ - frequencyPenalty?: number; - /** - * A value that controls how many completions will be internally generated prior to response - * formulation. - * When used together with n, best_of controls the number of candidate completions and must be - * greater than n. - * Because this setting can generate many completions, it may quickly consume your token quota. - * Use carefully and ensure reasonable settings for max_tokens and stop. - */ - bestOf?: number; +/** Optional parameters. */ +export interface CreateVectorStoreOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface GetVectorStoreOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface ModifyVectorStoreOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface DeleteVectorStoreOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface ListVectorStoreFilesOptionalParams extends OperationOptions { + /** Filter by file status. */ + filter?: VectorStoreFileStatusFilter; + /** A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. */ + limit?: number; + /** Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. */ + order?: ListSortOrder; + /** A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. */ + after?: string; + /** A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. */ + before?: string; } -/** - * This module contains models that we want to live side-by-side with the - * corresponding generated models. This is useful for providing customer-facing - * models that have different names/types than the generated models. - */ - -export interface GetChatCompletionsOptions extends OperationOptions { - /** A list of functions the model may generate JSON inputs for. */ - functions?: FunctionDefinition[]; - /** - * Controls how the model responds to function calls. "none" means the model does not call a function, - * and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. - * Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. - * "none" is the default when no functions are present. "auto" is the default if functions are present. - */ - functionCall?: FunctionCallPreset | FunctionName; - /** The maximum number of tokens to generate. */ - maxTokens?: number; - /** - * The sampling temperature to use that controls the apparent creativity of generated completions. - * Higher values will make output more random while lower values will make results more focused - * and deterministic. - * It is not recommended to modify temperature and topP for the same completions request as the - * interaction of these two settings is difficult to predict. - */ - temperature?: number; - /** - * An alternative to sampling with temperature called nucleus sampling. This value causes the - * model to consider the results of tokens with the provided probability mass. As an example, a - * value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be - * considered. - * It is not recommended to modify temperature and topP for the same completions request as the - * interaction of these two settings is difficult to predict. - */ - topP?: number; - /** - * A map between GPT token IDs and bias scores that influences the probability of specific tokens - * appearing in a completions response. Token IDs are computed via external tokenizer tools, while - * bias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to - * a full ban or exclusive selection of a token, respectively. The exact behavior of a given bias - * score varies by model. - */ - logitBias?: Record; - /** - * An identifier for the caller or end user of the operation. This may be used for tracking - * or rate-limiting purposes. - */ - user?: string; - /** - * The number of chat completions choices that should be generated for a chat completions - * response. - * Because this setting can generate many completions, it may quickly consume your token quota. - * Use carefully and ensure reasonable settings for maxTokens and stop. - */ - n?: number; - /** A collection of textual sequences that will end completions generation. */ - stop?: string[]; - /** - * A value that influences the probability of generated tokens appearing based on their existing - * presence in generated text. - * Positive values will make tokens less likely to appear when they already exist and increase the - * model's likelihood to output new topics. - */ - presencePenalty?: number; - /** - * A value that influences the probability of generated tokens appearing based on their cumulative - * frequency in generated text. - * Positive values will make tokens less likely to appear as their frequency increases and - * decrease the likelihood of the model repeating the same statements verbatim. - */ - frequencyPenalty?: number; - /** - * If specified, the system will make a best effort to sample deterministically such that repeated requests with the - * same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the - * system_fingerprint response parameter to monitor changes in the backend." - */ - seed?: number; - /** An object specifying the format that the model must output. Used to enable JSON mode. */ - responseFormat?: ChatCompletionsResponseFormat; - /** The available tool definitions that the chat completions request can use, including caller-defined functions. */ - tools?: ChatCompletionsToolDefinitionUnion[]; - /** If specified, the model will configure which of the provided tools it can use for the chat completions response. */ - toolChoice?: ChatCompletionsNamedToolSelectionUnion; - /** - * The configuration entries for Azure OpenAI chat extensions that use them. - * This additional specification is only compatible with Azure OpenAI. - */ - azureExtensionOptions?: AzureExtensionsOptions; +/** Optional parameters. */ +export interface CreateVectorStoreFileOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface GetVectorStoreFileOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface DeleteVectorStoreFileOptionalParams extends OperationOptions {} + +/** Optional parameters. */ +export interface CreateVectorStoreFileBatchOptionalParams + extends OperationOptions {} + +/** Optional parameters. */ +export interface GetVectorStoreFileBatchOptionalParams + extends OperationOptions {} + +/** Optional parameters. */ +export interface CancelVectorStoreFileBatchOptionalParams + extends OperationOptions {} + +/** Optional parameters. */ +export interface ListVectorStoreFileBatchFilesOptionalParams + extends OperationOptions { + /** Filter by file status. */ + filter?: VectorStoreFileStatusFilter; + /** A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. */ + limit?: number; + /** Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. */ + order?: ListSortOrder; + /** A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. */ + after?: string; + /** A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. */ + before?: string; } diff --git a/sdk/openai/openai/src/rest/assistantsClient.ts b/sdk/openai/openai/src/rest/assistantsClient.ts new file mode 100644 index 000000000000..2db46cba3de0 --- /dev/null +++ b/sdk/openai/openai/src/rest/assistantsClient.ts @@ -0,0 +1,58 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { getClient, ClientOptions } from "@azure-rest/core-client"; +import { logger } from "../logger.js"; +import { TokenCredential, KeyCredential } from "@azure/core-auth"; +import { AssistantsContext } from "./clientDefinitions.js"; + +/** The optional parameters for the client */ +export interface AssistantsContextOptions extends ClientOptions {} + +/** + * Initialize a new instance of `AssistantsContext` + * @param endpointParam - An OpenAI endpoint supporting assistants functionality. + * @param credentials - uniquely identify client credential + * @param options - the parameter for all optional parameters + */ +export default function createClient( + endpointParam: string, + credentials: TokenCredential | KeyCredential, + options: AssistantsContextOptions = {}, +): AssistantsContext { + const endpointUrl = options.endpoint ?? options.baseUrl ?? `${endpointParam}`; + const userAgentInfo = `azsdk-js-openai-assistants-rest/1.0.0-beta.1`; + const userAgentPrefix = + options.userAgentOptions && options.userAgentOptions.userAgentPrefix + ? `${options.userAgentOptions.userAgentPrefix} ${userAgentInfo}` + : `${userAgentInfo}`; + options = { + ...options, + userAgentOptions: { + userAgentPrefix, + }, + loggingOptions: { + logger: options.loggingOptions?.logger ?? logger.info, + }, + credentials: { + scopes: options.credentials?.scopes ?? [ + "https://cognitiveservices.azure.com/.default", + ], + apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", + }, + }; + const client = getClient( + endpointUrl, + credentials, + options, + ) as AssistantsContext; + + client.pipeline.removePolicy({ name: "ApiVersionPolicy" }); + if (options.apiVersion) { + logger.warning( + "This client does not support client api-version, please change it at the operation level", + ); + } + + return client; +} diff --git a/sdk/openai/openai/src/rest/clientDefinitions.ts b/sdk/openai/openai/src/rest/clientDefinitions.ts index 464aa4164034..9a64a1d164cd 100644 --- a/sdk/openai/openai/src/rest/clientDefinitions.ts +++ b/sdk/openai/openai/src/rest/clientDefinitions.ts @@ -2,133 +2,405 @@ // Licensed under the MIT license. import { - GetAudioTranscriptionAsPlainTextParameters, - GetAudioTranscriptionAsResponseObjectParameters, - GetAudioTranslationAsPlainTextParameters, - GetAudioTranslationAsResponseObjectParameters, - GetCompletionsParameters, - GetChatCompletionsParameters, - GetImageGenerationsParameters, - GetEmbeddingsParameters, + CreateAssistantParameters, + ListAssistantsParameters, + GetAssistantParameters, + UpdateAssistantParameters, + DeleteAssistantParameters, + CreateThreadParameters, + GetThreadParameters, + UpdateThreadParameters, + DeleteThreadParameters, + CreateMessageParameters, + ListMessagesParameters, + GetMessageParameters, + UpdateMessageParameters, + CreateRunParameters, + ListRunsParameters, + GetRunParameters, + UpdateRunParameters, + SubmitToolOutputsToRunParameters, + CancelRunParameters, + CreateThreadAndRunParameters, + GetRunStepParameters, + ListRunStepsParameters, + ListFilesParameters, + UploadFileParameters, + DeleteFileParameters, + GetFileParameters, + GetFileContentParameters, + ListVectorStoresParameters, + CreateVectorStoreParameters, + GetVectorStoreParameters, + ModifyVectorStoreParameters, + DeleteVectorStoreParameters, + ListVectorStoreFilesParameters, + CreateVectorStoreFileParameters, + GetVectorStoreFileParameters, + DeleteVectorStoreFileParameters, + CreateVectorStoreFileBatchParameters, + GetVectorStoreFileBatchParameters, + CancelVectorStoreFileBatchParameters, + ListVectorStoreFileBatchFilesParameters, } from "./parameters.js"; import { - GetAudioTranscriptionAsPlainText200Response, - GetAudioTranscriptionAsPlainTextDefaultResponse, - GetAudioTranscriptionAsResponseObject200Response, - GetAudioTranscriptionAsResponseObjectDefaultResponse, - GetAudioTranslationAsPlainText200Response, - GetAudioTranslationAsPlainTextDefaultResponse, - GetAudioTranslationAsResponseObject200Response, - GetAudioTranslationAsResponseObjectDefaultResponse, - GetCompletions200Response, - GetCompletionsDefaultResponse, - GetChatCompletions200Response, - GetChatCompletionsDefaultResponse, - GetImageGenerations200Response, - GetImageGenerationsDefaultResponse, - GetEmbeddings200Response, - GetEmbeddingsDefaultResponse, + CreateAssistant200Response, + ListAssistants200Response, + GetAssistant200Response, + UpdateAssistant200Response, + DeleteAssistant200Response, + CreateThread200Response, + GetThread200Response, + UpdateThread200Response, + DeleteThread200Response, + CreateMessage200Response, + ListMessages200Response, + GetMessage200Response, + UpdateMessage200Response, + CreateRun200Response, + ListRuns200Response, + GetRun200Response, + UpdateRun200Response, + SubmitToolOutputsToRun200Response, + CancelRun200Response, + CreateThreadAndRun200Response, + GetRunStep200Response, + ListRunSteps200Response, + ListFiles200Response, + UploadFile200Response, + DeleteFile200Response, + GetFile200Response, + GetFileContent200Response, + ListVectorStores200Response, + CreateVectorStore200Response, + GetVectorStore200Response, + ModifyVectorStore200Response, + DeleteVectorStore200Response, + ListVectorStoreFiles200Response, + CreateVectorStoreFile200Response, + GetVectorStoreFile200Response, + DeleteVectorStoreFile200Response, + CreateVectorStoreFileBatch200Response, + GetVectorStoreFileBatch200Response, + CancelVectorStoreFileBatch200Response, + ListVectorStoreFileBatchFiles200Response, } from "./responses.js"; import { Client, StreamableMethod } from "@azure-rest/core-client"; -export interface GetAudioTranscriptionAsPlainText { - /** - * Gets transcribed text and associated metadata from provided spoken audio data. Audio will be transcribed in the - * written language corresponding to the language it was spoken in. - */ +export interface CreateAssistant { + /** Creates a new assistant. */ post( - options: GetAudioTranscriptionAsPlainTextParameters, - ): StreamableMethod< - GetAudioTranscriptionAsPlainText200Response | GetAudioTranscriptionAsPlainTextDefaultResponse - >; - /** - * Gets transcribed text and associated metadata from provided spoken audio data. Audio will be transcribed in the - * written language corresponding to the language it was spoken in. - */ + options: CreateAssistantParameters, + ): StreamableMethod; + /** Gets a list of assistants that were previously created. */ + get( + options?: ListAssistantsParameters, + ): StreamableMethod; +} + +export interface GetAssistant { + /** Retrieves an existing assistant. */ + get( + options?: GetAssistantParameters, + ): StreamableMethod; + /** Modifies an existing assistant. */ post( - options: GetAudioTranscriptionAsResponseObjectParameters, - ): StreamableMethod< - | GetAudioTranscriptionAsResponseObject200Response - | GetAudioTranscriptionAsResponseObjectDefaultResponse - >; + options: UpdateAssistantParameters, + ): StreamableMethod; + /** Deletes an assistant. */ + delete( + options?: DeleteAssistantParameters, + ): StreamableMethod; } -export interface GetAudioTranslationAsPlainText { - /** Gets English language transcribed text and associated metadata from provided spoken audio data. */ +export interface CreateThread { + /** Creates a new thread. Threads contain messages and can be run by assistants. */ post( - options: GetAudioTranslationAsPlainTextParameters, - ): StreamableMethod< - GetAudioTranslationAsPlainText200Response | GetAudioTranslationAsPlainTextDefaultResponse - >; - /** Gets English language transcribed text and associated metadata from provided spoken audio data. */ + options: CreateThreadParameters, + ): StreamableMethod; +} + +export interface GetThread { + /** Gets information about an existing thread. */ + get(options?: GetThreadParameters): StreamableMethod; + /** Modifies an existing thread. */ post( - options: GetAudioTranslationAsResponseObjectParameters, - ): StreamableMethod< - | GetAudioTranslationAsResponseObject200Response - | GetAudioTranslationAsResponseObjectDefaultResponse - >; + options: UpdateThreadParameters, + ): StreamableMethod; + /** Deletes an existing thread. */ + delete( + options?: DeleteThreadParameters, + ): StreamableMethod; } -export interface GetCompletions { - /** - * Gets completions for the provided input prompts. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. - */ +export interface CreateMessage { + /** Creates a new message on a specified thread. */ + post( + options: CreateMessageParameters, + ): StreamableMethod; + /** Gets a list of messages that exist on a thread. */ + get( + options?: ListMessagesParameters, + ): StreamableMethod; +} + +export interface GetMessage { + /** Gets an existing message from an existing thread. */ + get(options?: GetMessageParameters): StreamableMethod; + /** Modifies an existing message on an existing thread. */ + post( + options?: UpdateMessageParameters, + ): StreamableMethod; +} + +export interface CreateRun { + /** Creates a new run for an assistant thread. */ + post(options: CreateRunParameters): StreamableMethod; + /** Gets a list of runs for a specified thread. */ + get(options?: ListRunsParameters): StreamableMethod; +} + +export interface GetRun { + /** Gets an existing run from an existing thread. */ + get(options?: GetRunParameters): StreamableMethod; + /** Modifies an existing thread run. */ + post(options?: UpdateRunParameters): StreamableMethod; +} + +export interface SubmitToolOutputsToRun { + /** Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. */ post( - options?: GetCompletionsParameters, - ): StreamableMethod; + options?: SubmitToolOutputsToRunParameters, + ): StreamableMethod; +} + +export interface CancelRun { + /** Cancels a run of an in progress thread. */ + post(options?: CancelRunParameters): StreamableMethod; } -export interface GetChatCompletions { +export interface CreateThreadAndRun { + /** Creates a new assistant thread and immediately starts a run using that new thread. */ + post( + options: CreateThreadAndRunParameters, + ): StreamableMethod; +} + +export interface GetRunStep { + /** Gets a single run step from a thread run. */ + get(options?: GetRunStepParameters): StreamableMethod; +} + +export interface ListRunSteps { + /** Gets a list of run steps from a thread run. */ + get( + options?: ListRunStepsParameters, + ): StreamableMethod; +} + +export interface ListFiles { + /** Gets a list of previously uploaded files. */ + get(options?: ListFilesParameters): StreamableMethod; + /** Uploads a file for use by other operations. */ + post(options: UploadFileParameters): StreamableMethod; +} + +export interface DeleteFile { + /** Delete a previously uploaded file. */ + delete( + options?: DeleteFileParameters, + ): StreamableMethod; + /** Returns information about a specific file. Does not retrieve file content. */ + get(options?: GetFileParameters): StreamableMethod; +} + +export interface GetFileContent { + /** Returns information about a specific file. Does not retrieve file content. */ + get( + options?: GetFileContentParameters, + ): StreamableMethod; +} + +export interface ListVectorStores { + /** Returns a list of vector stores. */ + get( + options?: ListVectorStoresParameters, + ): StreamableMethod; + /** Creates a vector store. */ + post( + options: CreateVectorStoreParameters, + ): StreamableMethod; +} + +export interface GetVectorStore { + /** Returns the vector store object matching the specified ID. */ + get( + options?: GetVectorStoreParameters, + ): StreamableMethod; + /** The ID of the vector store to modify. */ + post( + options: ModifyVectorStoreParameters, + ): StreamableMethod; + /** Deletes the vector store object matching the specified ID. */ + delete( + options?: DeleteVectorStoreParameters, + ): StreamableMethod; +} + +export interface ListVectorStoreFiles { + /** Returns a list of vector store files. */ + get( + options?: ListVectorStoreFilesParameters, + ): StreamableMethod; + /** Create a vector store file by attaching a file to a vector store. */ + post( + options?: CreateVectorStoreFileParameters, + ): StreamableMethod; +} + +export interface GetVectorStoreFile { + /** Retrieves a vector store file. */ + get( + options?: GetVectorStoreFileParameters, + ): StreamableMethod; /** - * Gets chat completions for the provided chat messages. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. + * Delete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. + * To delete the file, use the delete file endpoint. */ - post( - options?: GetChatCompletionsParameters, - ): StreamableMethod; + delete( + options?: DeleteVectorStoreFileParameters, + ): StreamableMethod; } -export interface GetImageGenerations { - /** Creates an image given a prompt. */ +export interface CreateVectorStoreFileBatch { + /** Create a vector store file batch. */ post( - options?: GetImageGenerationsParameters, - ): StreamableMethod; + options?: CreateVectorStoreFileBatchParameters, + ): StreamableMethod; } -export interface GetEmbeddings { - /** Return the embeddings for a given prompt. */ +export interface GetVectorStoreFileBatch { + /** Retrieve a vector store file batch. */ + get( + options?: GetVectorStoreFileBatchParameters, + ): StreamableMethod; +} + +export interface CancelVectorStoreFileBatch { + /** Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible. */ post( - options?: GetEmbeddingsParameters, - ): StreamableMethod; + options?: CancelVectorStoreFileBatchParameters, + ): StreamableMethod; +} + +export interface ListVectorStoreFileBatchFiles { + /** Returns a list of vector store files in a batch. */ + get( + options?: ListVectorStoreFileBatchFilesParameters, + ): StreamableMethod; } export interface Routes { - /** Resource for '/deployments/\{deploymentId\}/audio/transcriptions' has methods for the following verbs: post */ + /** Resource for '/assistants' has methods for the following verbs: post, get */ + (path: "/assistants"): CreateAssistant; + /** Resource for '/assistants/\{assistantId\}' has methods for the following verbs: get, post, delete */ + (path: "/assistants/{assistantId}", assistantId: string): GetAssistant; + /** Resource for '/threads' has methods for the following verbs: post */ + (path: "/threads"): CreateThread; + /** Resource for '/threads/\{threadId\}' has methods for the following verbs: get, post, delete */ + (path: "/threads/{threadId}", threadId: string): GetThread; + /** Resource for '/threads/\{threadId\}/messages' has methods for the following verbs: post, get */ + (path: "/threads/{threadId}/messages", threadId: string): CreateMessage; + /** Resource for '/threads/\{threadId\}/messages/\{messageId\}' has methods for the following verbs: get, post */ + ( + path: "/threads/{threadId}/messages/{messageId}", + threadId: string, + messageId: string, + ): GetMessage; + /** Resource for '/threads/\{threadId\}/runs' has methods for the following verbs: post, get */ + (path: "/threads/{threadId}/runs", threadId: string): CreateRun; + /** Resource for '/threads/\{threadId\}/runs/\{runId\}' has methods for the following verbs: get, post */ + ( + path: "/threads/{threadId}/runs/{runId}", + threadId: string, + runId: string, + ): GetRun; + /** Resource for '/threads/\{threadId\}/runs/\{runId\}/submit_tool_outputs' has methods for the following verbs: post */ + ( + path: "/threads/{threadId}/runs/{runId}/submit_tool_outputs", + threadId: string, + runId: string, + ): SubmitToolOutputsToRun; + /** Resource for '/threads/\{threadId\}/runs/\{runId\}/cancel' has methods for the following verbs: post */ + ( + path: "/threads/{threadId}/runs/{runId}/cancel", + threadId: string, + runId: string, + ): CancelRun; + /** Resource for '/threads/runs' has methods for the following verbs: post */ + (path: "/threads/runs"): CreateThreadAndRun; + /** Resource for '/threads/\{threadId\}/runs/\{runId\}/steps/\{stepId\}' has methods for the following verbs: get */ + ( + path: "/threads/{threadId}/runs/{runId}/steps/{stepId}", + threadId: string, + runId: string, + stepId: string, + ): GetRunStep; + /** Resource for '/threads/\{threadId\}/runs/\{runId\}/steps' has methods for the following verbs: get */ + ( + path: "/threads/{threadId}/runs/{runId}/steps", + threadId: string, + runId: string, + ): ListRunSteps; + /** Resource for '/files' has methods for the following verbs: get, post */ + (path: "/files"): ListFiles; + /** Resource for '/files/\{fileId\}' has methods for the following verbs: delete, get */ + (path: "/files/{fileId}", fileId: string): DeleteFile; + /** Resource for '/files/\{fileId\}/content' has methods for the following verbs: get */ + (path: "/files/{fileId}/content", fileId: string): GetFileContent; + /** Resource for '/vector_stores' has methods for the following verbs: get, post */ + (path: "/vector_stores"): ListVectorStores; + /** Resource for '/vector_stores/\{vectorStoreId\}' has methods for the following verbs: get, post, delete */ + ( + path: "/vector_stores/{vectorStoreId}", + vectorStoreId: string, + ): GetVectorStore; + /** Resource for '/vector_stores/\{vectorStoreId\}/files' has methods for the following verbs: get, post */ + ( + path: "/vector_stores/{vectorStoreId}/files", + vectorStoreId: string, + ): ListVectorStoreFiles; + /** Resource for '/vector_stores/\{vectorStoreId\}/files/\{fileId\}' has methods for the following verbs: get, delete */ + ( + path: "/vector_stores/{vectorStoreId}/files/{fileId}", + vectorStoreId: string, + fileId: string, + ): GetVectorStoreFile; + /** Resource for '/vector_stores/\{vectorStoreId\}/file_batches' has methods for the following verbs: post */ + ( + path: "/vector_stores/{vectorStoreId}/file_batches", + vectorStoreId: string, + ): CreateVectorStoreFileBatch; + /** Resource for '/vector_stores/\{vectorStoreId\}/file_batches/\{batchId\}' has methods for the following verbs: get */ ( - path: "/deployments/{deploymentId}/audio/transcriptions", - deploymentId: string, - ): GetAudioTranscriptionAsPlainText; - /** Resource for '/deployments/\{deploymentId\}/audio/translations' has methods for the following verbs: post */ + path: "/vector_stores/{vectorStoreId}/file_batches/{batchId}", + vectorStoreId: string, + batchId: string, + ): GetVectorStoreFileBatch; + /** Resource for '/vector_stores/\{vectorStoreId\}/file_batches/\{batchId\}/cancel' has methods for the following verbs: post */ ( - path: "/deployments/{deploymentId}/audio/translations", - deploymentId: string, - ): GetAudioTranslationAsPlainText; - /** Resource for '/deployments/\{deploymentId\}/completions' has methods for the following verbs: post */ - (path: "/deployments/{deploymentId}/completions", deploymentId: string): GetCompletions; - /** Resource for '/deployments/\{deploymentId\}/chat/completions' has methods for the following verbs: post */ - (path: "/deployments/{deploymentId}/chat/completions", deploymentId: string): GetChatCompletions; - /** Resource for '/deployments/\{deploymentId\}/images/generations' has methods for the following verbs: post */ + path: "/vector_stores/{vectorStoreId}/file_batches/{batchId}/cancel", + vectorStoreId: string, + batchId: string, + ): CancelVectorStoreFileBatch; + /** Resource for '/vector_stores/\{vectorStoreId\}/file_batches/\{batchId\}/files' has methods for the following verbs: get */ ( - path: "/deployments/{deploymentId}/images/generations", - deploymentId: string, - ): GetImageGenerations; - /** Resource for '/deployments/\{deploymentId\}/embeddings' has methods for the following verbs: post */ - (path: "/deployments/{deploymentId}/embeddings", deploymentId: string): GetEmbeddings; + path: "/vector_stores/{vectorStoreId}/file_batches/{batchId}/files", + vectorStoreId: string, + batchId: string, + ): ListVectorStoreFileBatchFiles; } -export type OpenAIContext = Client & { +export type AssistantsContext = Client & { path: Routes; }; diff --git a/sdk/openai/openai/src/rest/index.ts b/sdk/openai/openai/src/rest/index.ts index a2d1968f9077..6799d4361cb6 100644 --- a/sdk/openai/openai/src/rest/index.ts +++ b/sdk/openai/openai/src/rest/index.ts @@ -1,14 +1,13 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import OpenAIClient from "./openAIClient.js"; +import AssistantsClient from "./assistantsClient.js"; -export * from "./openAIClient.js"; +export * from "./assistantsClient.js"; export * from "./parameters.js"; export * from "./responses.js"; export * from "./clientDefinitions.js"; -export * from "./isUnexpected.js"; export * from "./models.js"; export * from "./outputModels.js"; -export default OpenAIClient; +export default AssistantsClient; diff --git a/sdk/openai/openai/src/rest/isUnexpected.ts b/sdk/openai/openai/src/rest/isUnexpected.ts deleted file mode 100644 index ea5afb0479ea..000000000000 --- a/sdk/openai/openai/src/rest/isUnexpected.ts +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { - GetAudioTranscriptionAsPlainText200Response, - GetAudioTranscriptionAsResponseObject200Response, - GetAudioTranscriptionAsPlainTextDefaultResponse, - GetAudioTranslationAsPlainText200Response, - GetAudioTranslationAsResponseObject200Response, - GetAudioTranslationAsPlainTextDefaultResponse, - GetCompletions200Response, - GetCompletionsDefaultResponse, - GetChatCompletions200Response, - GetChatCompletionsDefaultResponse, - GetImageGenerations200Response, - GetImageGenerationsDefaultResponse, - GetEmbeddings200Response, - GetEmbeddingsDefaultResponse, -} from "./responses.js"; - -const responseMap: Record = { - "POST /deployments/{deploymentId}/audio/transcriptions": ["200"], - "POST /deployments/{deploymentId}/audio/translations": ["200"], - "POST /deployments/{deploymentId}/completions": ["200"], - "POST /deployments/{deploymentId}/chat/completions": ["200"], - "POST /deployments/{deploymentId}/images/generations": ["200"], - "POST /deployments/{deploymentId}/embeddings": ["200"], - "GET /operations/images/{operationId}": ["200"], - "POST /images/generations:submit": ["202"], - "GET /images/generations:submit": ["200", "202"], -}; - -export function isUnexpected( - response: - | GetAudioTranscriptionAsPlainText200Response - | GetAudioTranscriptionAsResponseObject200Response - | GetAudioTranscriptionAsPlainTextDefaultResponse, -): response is GetAudioTranscriptionAsPlainTextDefaultResponse; -export function isUnexpected( - response: - | GetAudioTranslationAsPlainText200Response - | GetAudioTranslationAsResponseObject200Response - | GetAudioTranslationAsPlainTextDefaultResponse, -): response is GetAudioTranslationAsPlainTextDefaultResponse; -export function isUnexpected( - response: GetCompletions200Response | GetCompletionsDefaultResponse, -): response is GetCompletionsDefaultResponse; -export function isUnexpected( - response: GetChatCompletions200Response | GetChatCompletionsDefaultResponse, -): response is GetChatCompletionsDefaultResponse; -export function isUnexpected( - response: GetImageGenerations200Response | GetImageGenerationsDefaultResponse, -): response is GetImageGenerationsDefaultResponse; -export function isUnexpected( - response: GetEmbeddings200Response | GetEmbeddingsDefaultResponse, -): response is GetEmbeddingsDefaultResponse; -export function isUnexpected( - response: - | GetAudioTranscriptionAsPlainText200Response - | GetAudioTranscriptionAsResponseObject200Response - | GetAudioTranscriptionAsPlainTextDefaultResponse - | GetAudioTranslationAsPlainText200Response - | GetAudioTranslationAsResponseObject200Response - | GetAudioTranslationAsPlainTextDefaultResponse - | GetCompletions200Response - | GetCompletionsDefaultResponse - | GetChatCompletions200Response - | GetChatCompletionsDefaultResponse - | GetImageGenerations200Response - | GetImageGenerationsDefaultResponse - | GetEmbeddings200Response - | GetEmbeddingsDefaultResponse, -): response is - | GetAudioTranscriptionAsPlainTextDefaultResponse - | GetAudioTranslationAsPlainTextDefaultResponse - | GetCompletionsDefaultResponse - | GetChatCompletionsDefaultResponse - | GetImageGenerationsDefaultResponse - | GetEmbeddingsDefaultResponse { - const lroOriginal = response.headers["x-ms-original-url"]; - const url = new URL(lroOriginal ?? response.request.url); - const method = response.request.method; - let pathDetails = responseMap[`${method} ${url.pathname}`]; - if (!pathDetails) { - pathDetails = getParametrizedPathSuccess(method, url.pathname); - } - return !pathDetails.includes(response.status); -} - -function getParametrizedPathSuccess(method: string, path: string): string[] { - const pathParts = path.split("/"); - - // Traverse list to match the longest candidate - // matchedLen: the length of candidate path - // matchedValue: the matched status code array - let matchedLen = -1, - matchedValue: string[] = []; - - // Iterate the responseMap to find a match - for (const [key, value] of Object.entries(responseMap)) { - // Extracting the path from the map key which is in format - // GET /path/foo - if (!key.startsWith(method)) { - continue; - } - const candidatePath = getPathFromMapKey(key); - // Get each part of the url path - const candidateParts = candidatePath.split("/"); - - // track if we have found a match to return the values found. - let found = true; - for (let i = candidateParts.length - 1, j = pathParts.length - 1; i >= 1 && j >= 1; i--, j--) { - if (candidateParts[i]?.startsWith("{") && candidateParts[i]?.indexOf("}") !== -1) { - const start = candidateParts[i]!.indexOf("}") + 1, - end = candidateParts[i]?.length; - // If the current part of the candidate is a "template" part - // Try to use the suffix of pattern to match the path - // {guid} ==> $ - // {guid}:export ==> :export$ - const isMatched = new RegExp(`${candidateParts[i]?.slice(start, end)}`).test( - pathParts[j] || "", - ); - - if (!isMatched) { - found = false; - break; - } - continue; - } - - // If the candidate part is not a template and - // the parts don't match mark the candidate as not found - // to move on with the next candidate path. - if (candidateParts[i] !== pathParts[j]) { - found = false; - break; - } - } - - // We finished evaluating the current candidate parts - // Update the matched value if and only if we found the longer pattern - if (found && candidatePath.length > matchedLen) { - matchedLen = candidatePath.length; - matchedValue = value; - } - } - - return matchedValue; -} - -function getPathFromMapKey(mapKey: string): string { - const pathStart = mapKey.indexOf("/"); - return mapKey.slice(pathStart); -} diff --git a/sdk/openai/openai/src/rest/models.ts b/sdk/openai/openai/src/rest/models.ts index 2a3f5caf621d..298a83b1f2bc 100644 --- a/sdk/openai/openai/src/rest/models.ts +++ b/sdk/openai/openai/src/rest/models.ts @@ -1,1108 +1,586 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -/** The configuration information for an audio transcription request. */ -export interface AudioTranscriptionOptions { - /** - * The audio data to transcribe. This must be the binary content of a file in one of the supported media formats: - * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. - */ - file: string; - /** The optional filename or descriptive identifier to associate with with the audio data. */ - filename?: string; - /** The requested format of the transcription response data, which will influence the content and detail of the result. */ - response_format?: AudioTranscriptionFormat; - /** - * The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code - * such as 'en' or 'fr'. - * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. - */ - language?: string; - /** - * An optional hint to guide the model's style or continue from a prior audio segment. The written language of the - * prompt should match the primary spoken language of the audio data. - */ - prompt?: string; - /** - * The sampling temperature, between 0 and 1. - * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - * If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. +/** The request details to use when creating a new assistant. */ +export interface AssistantCreationOptions { + /** The ID of the model to use. */ + model: string; + /** The name of the new assistant. */ + name?: string | null; + /** The description of the new assistant. */ + description?: string | null; + /** The system instructions for the new assistant to use. */ + instructions?: string | null; + /** The collection of tools to enable for the new assistant. */ + tools?: Array; + /** + * A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` + * tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + */ + tool_resources?: CreateToolResourcesOptions | null; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + * while lower values like 0.2 will make it more focused and deterministic. + */ + temperature?: number | null; + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + * So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. */ - temperature?: number; - /** The model to use for this transcription request. */ - model?: string; + top_p?: number | null; + /** The response format of the tool calls used by this assistant. */ + response_format?: AssistantsApiResponseFormatOption | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record | null; } -/** The configuration information for an audio translation request. */ -export interface AudioTranslationOptions { - /** - * The audio data to translate. This must be the binary content of a file in one of the supported media formats: - * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. - */ - file: string; - /** The optional filename or descriptive identifier to associate with with the audio data. */ - filename?: string; - /** The requested format of the translation response data, which will influence the content and detail of the result. */ - response_format?: AudioTranslationFormat; - /** - * An optional hint to guide the model's style or continue from a prior audio segment. The written language of the - * prompt should match the primary spoken language of the audio data. - */ - prompt?: string; - /** - * The sampling temperature, between 0 and 1. - * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - * If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. - */ - temperature?: number; - /** The model to use for this translation request. */ - model?: string; +/** An abstract representation of an input tool definition that an assistant can use. */ +export interface ToolDefinitionParent { + type: string; } -/** - * The configuration information for a completions request. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. - */ -export interface CompletionsOptions { - /** The prompts to generate completions from. */ - prompt: string[]; - /** The maximum number of tokens to generate. */ - max_tokens?: number; - /** - * The sampling temperature to use that controls the apparent creativity of generated completions. - * Higher values will make output more random while lower values will make results more focused - * and deterministic. - * It is not recommended to modify temperature and top_p for the same completions request as the - * interaction of these two settings is difficult to predict. - */ - temperature?: number; - /** - * An alternative to sampling with temperature called nucleus sampling. This value causes the - * model to consider the results of tokens with the provided probability mass. As an example, a - * value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be - * considered. - * It is not recommended to modify temperature and top_p for the same completions request as the - * interaction of these two settings is difficult to predict. - */ - top_p?: number; - /** - * A map between GPT token IDs and bias scores that influences the probability of specific tokens - * appearing in a completions response. Token IDs are computed via external tokenizer tools, while - * bias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to - * a full ban or exclusive selection of a token, respectively. The exact behavior of a given bias - * score varies by model. - */ - logit_bias?: Record; - /** - * An identifier for the caller or end user of the operation. This may be used for tracking - * or rate-limiting purposes. - */ - user?: string; - /** - * The number of completions choices that should be generated per provided prompt as part of an - * overall completions response. - * Because this setting can generate many completions, it may quickly consume your token quota. - * Use carefully and ensure reasonable settings for max_tokens and stop. - */ - n?: number; - /** - * A value that controls the emission of log probabilities for the provided number of most likely - * tokens within a completions response. - */ - logprobs?: number; - /** The suffix that comes after a completion of inserted text */ - suffix?: string; - /** - * A value specifying whether completions responses should include input prompts as prefixes to - * their generated output. - */ - echo?: boolean; - /** A collection of textual sequences that will end completions generation. */ - stop?: string[]; - /** - * A value that influences the probability of generated tokens appearing based on their existing - * presence in generated text. - * Positive values will make tokens less likely to appear when they already exist and increase the - * model's likelihood to output new topics. - */ - presence_penalty?: number; - /** - * A value that influences the probability of generated tokens appearing based on their cumulative - * frequency in generated text. - * Positive values will make tokens less likely to appear as their frequency increases and - * decrease the likelihood of the model repeating the same statements verbatim. - */ - frequency_penalty?: number; - /** - * A value that controls how many completions will be internally generated prior to response - * formulation. - * When used together with n, best_of controls the number of candidate completions and must be - * greater than n. - * Because this setting can generate many completions, it may quickly consume your token quota. - * Use carefully and ensure reasonable settings for max_tokens and stop. - */ - best_of?: number; - /** A value indicating whether chat completions should be streamed for this request. */ - stream?: boolean; - /** - * The model name to provide as part of this completions request. - * Not applicable to Azure OpenAI, where deployment information should be included in the Azure - * resource URI that's connected to. - */ - model?: string; +/** The input definition information for a code interpreter tool as used to configure an assistant. */ +export interface CodeInterpreterToolDefinition extends ToolDefinitionParent { + /** The object type, which is always 'code_interpreter'. */ + type: "code_interpreter"; } -/** - * The configuration information for a chat completions request. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. - */ -export interface ChatCompletionsOptions { - /** - * The collection of context messages associated with this chat completions request. - * Typical usage begins with a chat message for the System role that provides instructions for - * the behavior of the assistant, followed by alternating messages between the User and - * Assistant roles. - */ - messages: Array; - /** A list of functions the model may generate JSON inputs for. */ - functions?: Array; - /** - * Controls how the model responds to function calls. "none" means the model does not call a function, - * and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. - * Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. - * "none" is the default when no functions are present. "auto" is the default if functions are present. - */ - function_call?: FunctionCallPreset | FunctionName; - /** The maximum number of tokens to generate. */ - max_tokens?: number; - /** - * The sampling temperature to use that controls the apparent creativity of generated completions. - * Higher values will make output more random while lower values will make results more focused - * and deterministic. - * It is not recommended to modify temperature and top_p for the same completions request as the - * interaction of these two settings is difficult to predict. - */ - temperature?: number; - /** - * An alternative to sampling with temperature called nucleus sampling. This value causes the - * model to consider the results of tokens with the provided probability mass. As an example, a - * value of 0.15 will cause only the tokens comprising the top 15% of probability mass to be - * considered. - * It is not recommended to modify temperature and top_p for the same completions request as the - * interaction of these two settings is difficult to predict. - */ - top_p?: number; - /** - * A map between GPT token IDs and bias scores that influences the probability of specific tokens - * appearing in a completions response. Token IDs are computed via external tokenizer tools, while - * bias scores reside in the range of -100 to 100 with minimum and maximum values corresponding to - * a full ban or exclusive selection of a token, respectively. The exact behavior of a given bias - * score varies by model. - */ - logit_bias?: Record; - /** - * An identifier for the caller or end user of the operation. This may be used for tracking - * or rate-limiting purposes. - */ - user?: string; - /** - * The number of chat completions choices that should be generated for a chat completions - * response. - * Because this setting can generate many completions, it may quickly consume your token quota. - * Use carefully and ensure reasonable settings for max_tokens and stop. - */ - n?: number; - /** A collection of textual sequences that will end completions generation. */ - stop?: string[]; - /** - * A value that influences the probability of generated tokens appearing based on their existing - * presence in generated text. - * Positive values will make tokens less likely to appear when they already exist and increase the - * model's likelihood to output new topics. - */ - presence_penalty?: number; - /** - * A value that influences the probability of generated tokens appearing based on their cumulative - * frequency in generated text. - * Positive values will make tokens less likely to appear as their frequency increases and - * decrease the likelihood of the model repeating the same statements verbatim. - */ - frequency_penalty?: number; - /** A value indicating whether chat completions should be streamed for this request. */ - stream?: boolean; - /** - * The model name to provide as part of this completions request. - * Not applicable to Azure OpenAI, where deployment information should be included in the Azure - * resource URI that's connected to. - */ - model?: string; - /** - * The configuration entries for Azure OpenAI chat extensions that use them. - * This additional specification is only compatible with Azure OpenAI. - */ - data_sources?: Array; - /** If provided, the configuration options for available Azure OpenAI chat enhancements. */ - enhancements?: AzureChatEnhancementConfiguration; - /** - * If specified, the system will make a best effort to sample deterministically such that repeated requests with the - * same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the - * system_fingerprint response parameter to monitor changes in the backend." - */ - seed?: number; - /** Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model. */ - logprobs?: boolean | null; - /** An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. */ - top_logprobs?: number | null; - /** An object specifying the format that the model must output. Used to enable JSON mode. */ - response_format?: ChatCompletionsResponseFormat; - /** The available tool definitions that the chat completions request can use, including caller-defined functions. */ - tools?: Array; - /** If specified, the model will configure which of the provided tools it can use for the chat completions response. */ - tool_choice?: ChatCompletionsToolSelectionPreset | ChatCompletionsNamedToolSelection; +/** The input definition information for a file search tool as used to configure an assistant. */ +export interface FileSearchToolDefinition extends ToolDefinitionParent { + /** The object type, which is always 'file_search'. */ + type: "file_search"; } -/** An abstract representation of a chat message as provided in a request. */ -export interface ChatRequestMessageParent { - role: ChatRole; +/** The input definition information for a function tool as used to configure an assistant. */ +export interface FunctionToolDefinition extends ToolDefinitionParent { + /** The object type, which is always 'function'. */ + type: "function"; + /** The definition of the concrete function that the function tool should call. */ + function: FunctionDefinition; } -/** - * A request chat message containing system instructions that influence how the model will generate a chat completions - * response. - */ -export interface ChatRequestSystemMessage extends ChatRequestMessageParent { - /** The chat role associated with this message, which is always 'system' for system messages. */ - role: "system"; - /** The contents of the system message. */ - content: string; - /** An optional name for the participant. */ - name?: string; +/** The input definition information for a function. */ +export interface FunctionDefinition { + /** The name of the function to be called. */ + name: string; + /** A description of what the function does, used by the model to choose when and how to call the function. */ + description?: string; + /** The parameters the functions accepts, described as a JSON Schema object. */ + parameters: unknown; } -/** A request chat message representing user input to the assistant. */ -export interface ChatRequestUserMessage extends ChatRequestMessageParent { - /** The chat role associated with this message, which is always 'user' for user messages. */ - role: "user"; - /** The contents of the user message, with available input types varying by selected model. */ - content: string | Array; - /** An optional name for the participant. */ - name?: string; +/** + * Request object. A set of resources that are used by the assistant's tools. The resources are specific to the + * type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. + */ +export interface CreateToolResourcesOptions { + /** + * A list of file IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + code_interpreter?: CreateCodeInterpreterToolResourceOptions; + /** A list of vector stores or their IDs made available to the `file_search` tool. */ + file_search?: CreateFileSearchToolResourceOptions; } -/** An abstract representation of a structured content item within a chat message. */ -export interface ChatMessageContentItemParent { - type: string; +/** A set of resources that will be used by the `code_interpreter` tool. Request object. */ +export interface CreateCodeInterpreterToolResourceOptions { + /** A list of file IDs made available to the `code_interpreter` tool. */ + file_ids?: string[]; } -/** A structured chat content item containing plain text. */ -export interface ChatMessageTextContentItem extends ChatMessageContentItemParent { - /** The discriminated object type: always 'text' for this type. */ - type: "text"; - /** The content of the message. */ - text: string; +/** File IDs associated to the vector store to be passed to the helper. */ +export interface CreateFileSearchToolResourceVectorStoreOptions { + /** A list of file IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. */ + file_ids: string[]; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record | null; } -/** A structured chat content item containing an image reference. */ -export interface ChatMessageImageContentItem extends ChatMessageContentItemParent { - /** The discriminated object type: always 'image_url' for this type. */ - type: "image_url"; - /** An internet location, which must be accessible to the model,from which the image may be retrieved. */ - image_url: ChatMessageImageUrl; +/** + * An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. + * If `text` the model can return text or any value needed. + */ +export interface AssistantsApiResponseFormat { + /** Must be one of `text` or `json_object`. */ + type?: ApiResponseFormat; } -/** An internet location from which the model may retrieve an image. */ -export interface ChatMessageImageUrl { - /** The URL of the image. */ - url: string; +/** The request details to use when modifying an existing assistant. */ +export interface UpdateAssistantOptions { + /** The ID of the model to use. */ + model?: string; + /** The modified name for the assistant to use. */ + name?: string | null; + /** The modified description for the assistant to use. */ + description?: string | null; + /** The modified system instructions for the new assistant to use. */ + instructions?: string | null; + /** The modified collection of tools to enable for the assistant. */ + tools?: Array; /** - * The evaluation quality setting to use, which controls relative prioritization of speed, token consumption, and - * accuracy. + * A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, + * the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. */ - detail?: ChatMessageImageDetailLevel; -} - -/** A request chat message representing response or action from the assistant. */ -export interface ChatRequestAssistantMessage extends ChatRequestMessageParent { - /** The chat role associated with this message, which is always 'assistant' for assistant messages. */ - role: "assistant"; - /** The content of the message. */ - content: string | null; - /** An optional name for the participant. */ - name?: string; + tool_resources?: UpdateToolResourcesOptions; /** - * The tool calls that must be resolved and have their outputs appended to subsequent input messages for the chat - * completions request to resolve as configured. + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + * while lower values like 0.2 will make it more focused and deterministic. */ - tool_calls?: Array; + temperature?: number | null; /** - * The function call that must be resolved and have its output appended to subsequent input messages for the chat - * completions request to resolve as configured. + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + * So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. */ - function_call?: FunctionCall; -} - -/** - * An abstract representation of a tool call that must be resolved in a subsequent request to perform the requested - * chat completion. - */ -export interface ChatCompletionsToolCallParent { - /** The ID of the tool call. */ - id: string; - type: string; + top_p?: number | null; + /** The response format of the tool calls used by this assistant. */ + response_format?: AssistantsApiResponseFormatOption | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record | null; } /** - * A tool call to a function tool, issued by the model in evaluation of a configured function tool, that represents - * a function invocation needed for a subsequent chat completions request to resolve. + * Request object. A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. + * For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of + * vector store IDs. */ -export interface ChatCompletionsFunctionToolCall extends ChatCompletionsToolCallParent { - /** The type of tool call, in this case always 'function'. */ - type: "function"; - /** The details of the function invocation requested by the tool call. */ - function: FunctionCall; -} - -/** The name and arguments of a function that should be called, as generated by the model. */ -export interface FunctionCall { - /** The name of the function to call. */ - name: string; +export interface UpdateToolResourcesOptions { /** - * The arguments to call the function with, as generated by the model in JSON format. - * Note that the model does not always generate valid JSON, and may hallucinate parameters - * not defined by your function schema. Validate the arguments in your code before calling - * your function. + * Overrides the list of file IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. */ - arguments: string; + code_interpreter?: UpdateCodeInterpreterToolResourceOptions; + /** Overrides the vector store attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. */ + file_search?: UpdateFileSearchToolResourceOptions; } -/** A request chat message representing requested output from a configured tool. */ -export interface ChatRequestToolMessage extends ChatRequestMessageParent { - /** The chat role associated with this message, which is always 'tool' for tool messages. */ - role: "tool"; - /** The content of the message. */ - content: string | null; - /** The ID of the tool call resolved by the provided content. */ - tool_call_id: string; +/** Request object to update `code_interpreted` tool resources. */ +export interface UpdateCodeInterpreterToolResourceOptions { + /** A list of file IDs to override the current list of the assistant. */ + fileIds?: string[]; } -/** A request chat message representing requested output from a configured function. */ -export interface ChatRequestFunctionMessage extends ChatRequestMessageParent { - /** The chat role associated with this message, which is always 'function' for function messages. */ - role: "function"; - /** The name of the function that was called to produce output. */ - name: string; - /** The output of the function as requested by the function call. */ - content: string | null; +/** Request object to update `file_search` tool resources. */ +export interface UpdateFileSearchToolResourceOptions { + /** A list of vector store IDs to override the current list of the assistant. */ + vector_store_ids?: string[]; } -/** The definition of a caller-specified function that chat completions may invoke in response to matching user input. */ -export interface FunctionDefinition { - /** The name of the function to be called. */ - name: string; +/** The details used to create a new assistant thread. */ +export interface AssistantThreadCreationOptions { + /** The initial messages to associate with the new thread. */ + messages?: Array; /** - * A description of what the function does. The model will use this description when selecting the function and - * interpreting its parameters. + * A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the + * type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires + * a list of vector store IDs. */ - description?: string; - /** The parameters the function accepts, described as a JSON Schema object. */ - parameters?: unknown; + tool_resources?: CreateToolResourcesOptions | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record | null; } -/** - * A structure that specifies the exact name of a specific, request-provided function to use when processing a chat - * completions operation. - */ -export interface FunctionName { - /** The name of the function to call. */ - name: string; -} - -/** - * A representation of configuration data for a single Azure OpenAI chat extension. This will be used by a chat - * completions request that should use Azure OpenAI chat extensions to augment the response behavior. - * The use of this configuration is compatible only with Azure OpenAI. - */ -export interface AzureChatExtensionConfigurationParent { - type: AzureChatExtensionType; -} - -/** - * A specific representation of configurable options for Azure Search when using it as an Azure OpenAI chat - * extension. - */ -export interface AzureSearchChatExtensionConfiguration - extends AzureChatExtensionConfigurationParent { +/** A single message within an assistant thread, as provided during that thread's creation for its initial state. */ +export interface ThreadMessageOptions { /** - * The type label to use when configuring Azure OpenAI chat extensions. This should typically not be changed from its - * default value for Azure Cognitive Search. + * The role of the entity that is creating the message. Allowed values include: + * - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. + * - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into + * the conversation. */ - type: "azure_search"; - /** The parameters to use when configuring Azure Search. */ - parameters: AzureSearchChatExtensionParameters; -} - -/** Parameters for Azure Cognitive Search when used as an Azure OpenAI chat extension. The supported authentication types are APIKey, SystemAssignedManagedIdentity and UserAssignedManagedIdentity. */ -export interface AzureSearchChatExtensionParameters { + role: MessageRole; /** - * The authentication method to use when accessing the defined data source. - * Each data source type supports a specific set of available authentication methods; please see the documentation of - * the data source for supported mechanisms. - * If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential) - * authentication. + * The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via + * a separate call to the create message API. */ - authentication?: OnYourDataAuthenticationOptions; - /** The configured top number of documents to feature for the configured query. */ - top_n_documents?: number; - /** Whether queries should be restricted to use of indexed data. */ - in_scope?: boolean; - /** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */ - strictness?: number; - /** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */ - role_information?: string; - /** The absolute endpoint path for the Azure Cognitive Search resource to use. */ - endpoint: string; - /** The name of the index to use as available in the referenced Azure Cognitive Search resource. */ - index_name: string; - /** Customized field mapping behavior to use when interacting with the search index. */ - fields_mapping?: AzureSearchIndexFieldMappingOptions; - /** The query type to use with Azure Cognitive Search. */ - query_type?: AzureSearchQueryType; - /** The additional semantic configuration for the query. */ - semantic_configuration?: string; - /** Search filter. */ - filter?: string; - /** The embedding dependency for vector search. */ - embedding_dependency?: OnYourDataVectorizationSource; -} - -/** The authentication options for Azure OpenAI On Your Data. */ -export interface OnYourDataAuthenticationOptionsParent { - type: OnYourDataAuthenticationType; + content: string; + /** A list of files attached to the message, and the tools they should be added to. */ + attachments?: Array | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record | null; } -/** The authentication options for Azure OpenAI On Your Data when using an API key. */ -export interface OnYourDataApiKeyAuthenticationOptions - extends OnYourDataAuthenticationOptionsParent { - /** The authentication type of API key. */ - type: "api_key"; - /** The API key to use for authentication. */ - key: string; +/** This describes to which tools a file has been attached. */ +export interface MessageAttachment { + /** The ID of the file to attach to the message. */ + file_id: string; + /** The tools to add to this file. */ + tools: MessageAttachmentToolDefinition[]; } -/** The authentication options for Azure OpenAI On Your Data when using a connection string. */ -export interface OnYourDataConnectionStringAuthenticationOptions - extends OnYourDataAuthenticationOptionsParent { - /** The authentication type of connection string. */ - type: "connection_string"; - /** The connection string to use for authentication. */ - connection_string: string; +/** The details used to update an existing assistant thread */ +export interface UpdateAssistantThreadOptions { + /** + * A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the + * type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires + * a list of vector store IDs + */ + tool_resources?: UpdateToolResourcesOptions | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record | null; } -/** The authentication options for Azure OpenAI On Your Data when using an Elasticsearch key and key ID pair. */ -export interface OnYourDataKeyAndKeyIdAuthenticationOptions - extends OnYourDataAuthenticationOptionsParent { - /** The authentication type of Elasticsearch key and key ID pair. */ - type: "key_and_key_id"; - /** The key to use for authentication. */ - key: string; - /** The key ID to use for authentication. */ - key_id: string; +/** A single, existing message within an assistant thread. */ +export interface ThreadMessage { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always 'thread.message'. */ + object: "thread.message"; + /** The Unix timestamp, in seconds, representing when this object was created. */ + created_at: number; + /** The ID of the thread that this message belongs to. */ + thread_id: string; + /** The status of the message. */ + status: MessageStatus; + /** On an incomplete message, details about why the message is incomplete. */ + incomplete_details: MessageIncompleteDetails | null; + /** The Unix timestamp (in seconds) for when the message was completed. */ + completed_at: number | null; + /** The Unix timestamp (in seconds) for when the message was marked as incomplete. */ + incomplete_at: number | null; + /** The role associated with the assistant thread message. */ + role: MessageRole; + /** The list of content items associated with the assistant thread message. */ + content: Array; + /** If applicable, the ID of the assistant that authored this message. */ + assistant_id: string | null; + /** If applicable, the ID of the run associated with the authoring of this message. */ + run_id: string | null; + /** A list of files attached to the message, and the tools they were added to. */ + attachments: Array | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata: Record | null; +} + +/** Information providing additional detail about a message entering an incomplete status. */ +export interface MessageIncompleteDetails { + /** The provided reason describing why the message was marked as incomplete. */ + reason: MessageIncompleteDetailsReason; +} + +/** An abstract representation of a single item of thread message content. */ +export interface MessageContentParent { + type: string; } -/** The authentication options for Azure OpenAI On Your Data when using an Elasticsearch encoded API key. */ -export interface OnYourDataEncodedApiKeyAuthenticationOptions - extends OnYourDataAuthenticationOptionsParent { - /** The authentication type of Elasticsearch encoded API Key. */ - type: "encoded_api_key"; - /** The encoded API key to use for authentication. */ - encoded_api_key: string; +/** A representation of a textual item of thread message content. */ +export interface MessageTextContent extends MessageContentParent { + /** The object type, which is always 'text'. */ + type: "text"; + /** The text and associated annotations for this thread message content item. */ + text: MessageTextDetails; } -/** The authentication options for Azure OpenAI On Your Data when using access token. */ -export interface OnYourDataAccessTokenAuthenticationOptions - extends OnYourDataAuthenticationOptionsParent { - /** The authentication type of access token. */ - type: "access_token"; - /** The access token to use for authentication. */ - access_token: string; +/** The text and associated annotations for a single item of assistant thread message content. */ +export interface MessageTextDetails { + /** The text data. */ + value: string; + /** A list of annotations associated with this text. */ + annotations: Array; } -/** The authentication options for Azure OpenAI On Your Data when using a system-assigned managed identity. */ -export interface OnYourDataSystemAssignedManagedIdentityAuthenticationOptions - extends OnYourDataAuthenticationOptionsParent { - /** The authentication type of system-assigned managed identity. */ - type: "system_assigned_managed_identity"; +/** An abstract representation of an annotation to text thread message content. */ +export interface MessageTextAnnotationParent { + /** The textual content associated with this text annotation item. */ + text: string; + type: string; } -/** The authentication options for Azure OpenAI On Your Data when using a user-assigned managed identity. */ -export interface OnYourDataUserAssignedManagedIdentityAuthenticationOptions - extends OnYourDataAuthenticationOptionsParent { - /** The authentication type of user-assigned managed identity. */ - type: "user_assigned_managed_identity"; - /** The resource ID of the user-assigned managed identity to use for authentication. */ - managed_identity_resource_id: string; +/** A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the 'file_search' tool to search files. */ +export interface MessageTextFileCitationAnnotation + extends MessageTextAnnotationParent { + /** The object type, which is always 'file_citation'. */ + type: "file_citation"; + /** + * A citation within the message that points to a specific quote from a specific file. + * Generated when the assistant uses the "file_search" tool to search files. + */ + file_citation: MessageTextFileCitationDetails; + /** The first text index associated with this text annotation. */ + start_index?: number; + /** The last text index associated with this text annotation. */ + end_index?: number; } -/** Optional settings to control how fields are processed when using a configured Azure Search resource. */ -export interface AzureSearchIndexFieldMappingOptions { - /** The name of the index field to use as a title. */ - title_field?: string; - /** The name of the index field to use as a URL. */ - url_field?: string; - /** The name of the index field to use as a filepath. */ - filepath_field?: string; - /** The names of index fields that should be treated as content. */ - content_fields?: string[]; - /** The separator pattern that content fields should use. */ - content_fields_separator?: string; - /** The names of fields that represent vector data. */ - vector_fields?: string[]; - /** The names of fields that represent image vector data. */ - image_vector_fields?: string[]; +/** A representation of a file-based text citation, as used in a file-based annotation of text thread message content. */ +export interface MessageTextFileCitationDetails { + /** The ID of the file associated with this citation. */ + file_id: string; + /** The specific quote cited in the associated file. */ + quote: string; } -/** An abstract representation of a vectorization source for Azure OpenAI On Your Data with vector search. */ -export interface OnYourDataVectorizationSourceParent { - type: OnYourDataVectorizationSourceType; +/** A citation within the message that points to a file located at a specific path. */ +export interface MessageTextFilePathAnnotation + extends MessageTextAnnotationParent { + /** The object type, which is always 'file_path'. */ + type: "file_path"; + /** A URL for the file that's generated when the assistant used the code_interpreter tool to generate a file. */ + file_path: MessageTextFilePathDetails; + /** The first text index associated with this text annotation. */ + start_index?: number; + /** The last text index associated with this text annotation. */ + end_index?: number; } -/** - * The details of a a vectorization source, used by Azure OpenAI On Your Data when applying vector search, that is based - * on a public Azure OpenAI endpoint call for embeddings. - */ -export interface OnYourDataEndpointVectorizationSource extends OnYourDataVectorizationSourceParent { - /** The type of vectorization source to use. Always 'Endpoint' for this type. */ - type: "endpoint"; - /** Specifies the resource endpoint URL from which embeddings should be retrieved. It should be in the format of https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME/embeddings. The api-version query parameter is not allowed. */ - endpoint: string; - /** Specifies the authentication options to use when retrieving embeddings from the specified endpoint. */ - authentication: OnYourDataAuthenticationOptions; +/** An encapsulation of an image file ID, as used by message image content. */ +export interface MessageTextFilePathDetails { + /** The ID of the specific file that the citation is from. */ + file_id: string; } -/** - * The details of a a vectorization source, used by Azure OpenAI On Your Data when applying vector search, that is based - * on an internal embeddings model deployment name in the same Azure OpenAI resource. - */ -export interface OnYourDataDeploymentNameVectorizationSource - extends OnYourDataVectorizationSourceParent { - /** The type of vectorization source to use. Always 'DeploymentName' for this type. */ - type: "deployment_name"; - /** The embedding model deployment name within the same Azure OpenAI resource. This enables you to use vector search without Azure OpenAI api-key and without Azure OpenAI public network access. */ - deployment_name: string; +/** A representation of image file content in a thread message. */ +export interface MessageImageFileContent extends MessageContentParent { + /** The object type, which is always 'image_file'. */ + type: "image_file"; + /** The image file for this thread message content item. */ + image_file: MessageImageFileDetails; } -/** - * The details of a a vectorization source, used by Azure OpenAI On Your Data when applying vector search, that is based - * on a search service model ID. Currently only supported by Elasticsearch®. - */ -export interface OnYourDataModelIdVectorizationSource extends OnYourDataVectorizationSourceParent { - /** The type of vectorization source to use. Always 'ModelId' for this type. */ - type: "model_id"; - /** The embedding model ID build inside the search service. Currently only supported by Elasticsearch®. */ - model_id: string; +/** An image reference, as represented in thread message content. */ +export interface MessageImageFileDetails { + /** The ID for the file associated with this image. */ + file_id: string; } -/** - * A specific representation of configurable options for Azure Machine Learning vector index when using it as an Azure - * OpenAI chat extension. - */ -export interface AzureMachineLearningIndexChatExtensionConfiguration - extends AzureChatExtensionConfigurationParent { +/** The details used when creating a new run of an assistant thread. */ +export interface CreateRunOptions { + /** The ID of the assistant that should run the thread. */ + assistant_id: string; + /** The overridden model name that the assistant should use to run the thread. */ + model?: string | null; + /** The overridden system instructions that the assistant should use to run the thread. */ + instructions?: string | null; /** - * The type label to use when configuring Azure OpenAI chat extensions. This should typically not be changed from its - * default value for Azure Machine Learning vector index. + * Additional instructions to append at the end of the instructions for the run. This is useful for modifying the behavior + * on a per-run basis without overriding other instructions. */ - type: "azure_ml_index"; - /** The parameters for the Azure Machine Learning vector index chat extension. */ - parameters: AzureMachineLearningIndexChatExtensionParameters; -} - -/** Parameters for the Azure Machine Learning vector index chat extension. The supported authentication types are AccessToken, SystemAssignedManagedIdentity and UserAssignedManagedIdentity. */ -export interface AzureMachineLearningIndexChatExtensionParameters { + additional_instructions?: string | null; + /** Adds additional messages to the thread before creating the run. */ + additional_messages?: Array | null; + /** The overridden list of enabled tools that the assistant should use to run the thread. */ + tools?: Array | null; /** - * The authentication method to use when accessing the defined data source. - * Each data source type supports a specific set of available authentication methods; please see the documentation of - * the data source for supported mechanisms. - * If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential) - * authentication. + * If `true`, returns a stream of events that happen during the Run as server-sent events, + * terminating when the Run enters a terminal state with a `data: [DONE]` message. */ - authentication?: OnYourDataAuthenticationOptions; - /** The configured top number of documents to feature for the configured query. */ - top_n_documents?: number; - /** Whether queries should be restricted to use of indexed data. */ - in_scope?: boolean; - /** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */ - strictness?: number; - /** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */ - role_information?: string; - /** The resource ID of the Azure Machine Learning project. */ - project_resource_id: string; - /** The Azure Machine Learning vector index name. */ - name: string; - /** The version of the Azure Machine Learning vector index. */ - version: string; - /** Search filter. Only supported if the Azure Machine Learning vector index is of type AzureSearch. */ - filter?: string; -} - -/** - * A specific representation of configurable options for Azure Cosmos DB when using it as an Azure OpenAI chat - * extension. - */ -export interface AzureCosmosDBChatExtensionConfiguration - extends AzureChatExtensionConfigurationParent { + stream?: boolean; /** - * The type label to use when configuring Azure OpenAI chat extensions. This should typically not be changed from its - * default value for Azure Cosmos DB. + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. */ - type: "azure_cosmos_db"; - /** The parameters to use when configuring Azure OpenAI CosmosDB chat extensions. */ - parameters: AzureCosmosDBChatExtensionParameters; -} - -/** - * Parameters to use when configuring Azure OpenAI On Your Data chat extensions when using Azure Cosmos DB for - * MongoDB vCore. The supported authentication type is ConnectionString. - */ -export interface AzureCosmosDBChatExtensionParameters { + temperature?: number | null; /** - * The authentication method to use when accessing the defined data source. - * Each data source type supports a specific set of available authentication methods; please see the documentation of - * the data source for supported mechanisms. - * If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential) - * authentication. + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + * comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. */ - authentication?: OnYourDataAuthenticationOptions; - /** The configured top number of documents to feature for the configured query. */ - top_n_documents?: number; - /** Whether queries should be restricted to use of indexed data. */ - in_scope?: boolean; - /** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */ - strictness?: number; - /** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */ - role_information?: string; - /** The MongoDB vCore database name to use with Azure Cosmos DB. */ - database_name: string; - /** The name of the Azure Cosmos DB resource container. */ - container_name: string; - /** The MongoDB vCore index name to use with Azure Cosmos DB. */ - index_name: string; - /** Customized field mapping behavior to use when interacting with the search index. */ - fields_mapping: AzureCosmosDBFieldMappingOptions; - /** The embedding dependency for vector search. */ - embedding_dependency: OnYourDataVectorizationSource; -} - -/** Optional settings to control how fields are processed when using a configured Azure Cosmos DB resource. */ -export interface AzureCosmosDBFieldMappingOptions { - /** The name of the index field to use as a title. */ - title_field?: string; - /** The name of the index field to use as a URL. */ - url_field?: string; - /** The name of the index field to use as a filepath. */ - filepath_field?: string; - /** The names of index fields that should be treated as content. */ - content_fields: string[]; - /** The separator pattern that content fields should use. */ - content_fields_separator?: string; - /** The names of fields that represent vector data. */ - vector_fields: string[]; -} - -/** - * A specific representation of configurable options for Elasticsearch when using it as an Azure OpenAI chat - * extension. - */ -export interface ElasticsearchChatExtensionConfiguration - extends AzureChatExtensionConfigurationParent { + top_p?: number | null; /** - * The type label to use when configuring Azure OpenAI chat extensions. This should typically not be changed from its - * default value for Elasticsearch®. + * The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only + * the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, + * the run will end with status `incomplete`. See `incomplete_details` for more info. */ - type: "elasticsearch"; - /** The parameters to use when configuring Elasticsearch®. */ - parameters: ElasticsearchChatExtensionParameters; -} - -/** Parameters to use when configuring Elasticsearch® as an Azure OpenAI chat extension. The supported authentication types are KeyAndKeyId and EncodedAPIKey. */ -export interface ElasticsearchChatExtensionParameters { + max_prompt_tokens?: number | null; /** - * The authentication method to use when accessing the defined data source. - * Each data source type supports a specific set of available authentication methods; please see the documentation of - * the data source for supported mechanisms. - * If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential) - * authentication. + * The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort + * to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of + * completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. */ - authentication?: OnYourDataAuthenticationOptions; - /** The configured top number of documents to feature for the configured query. */ - top_n_documents?: number; - /** Whether queries should be restricted to use of indexed data. */ - in_scope?: boolean; - /** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */ - strictness?: number; - /** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */ - role_information?: string; - /** The endpoint of Elasticsearch®. */ - endpoint: string; - /** The index name of Elasticsearch®. */ - index_name: string; - /** The index field mapping options of Elasticsearch®. */ - fields_mapping?: ElasticsearchIndexFieldMappingOptions; - /** The query type of Elasticsearch®. */ - query_type?: ElasticsearchQueryType; - /** The embedding dependency for vector search. */ - embedding_dependency?: OnYourDataVectorizationSource; -} - -/** Optional settings to control how fields are processed when using a configured Elasticsearch® resource. */ -export interface ElasticsearchIndexFieldMappingOptions { - /** The name of the index field to use as a title. */ - title_field?: string; - /** The name of the index field to use as a URL. */ - url_field?: string; - /** The name of the index field to use as a filepath. */ - filepath_field?: string; - /** The names of index fields that should be treated as content. */ - content_fields?: string[]; - /** The separator pattern that content fields should use. */ - content_fields_separator?: string; - /** The names of fields that represent vector data. */ - vector_fields?: string[]; + max_completion_tokens?: number | null; + /** The strategy to use for dropping messages as the context windows moves forward. */ + truncation_strategy?: TruncationObject | null; + /** Controls whether or not and which tool is called by the model. */ + tool_choice?: AssistantsApiToolChoiceOption | null; + /** Specifies the format that the model must output. */ + response_format?: AssistantsApiResponseFormatOption | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record | null; } /** - * A specific representation of configurable options for Pinecone when using it as an Azure OpenAI chat - * extension. + * Controls for how a thread will be truncated prior to the run. Use this to control the initial + * context window of the run. */ -export interface PineconeChatExtensionConfiguration extends AzureChatExtensionConfigurationParent { +export interface TruncationObject { /** - * The type label to use when configuring Azure OpenAI chat extensions. This should typically not be changed from its - * default value for Pinecone. + * The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will + * be truncated to the `lastMessages` count most recent messages in the thread. When set to `auto`, messages in the middle of the thread + * will be dropped to fit the context length of the model, `max_prompt_tokens`. */ - type: "pinecone"; - /** The parameters to use when configuring Azure OpenAI chat extensions. */ - parameters: PineconeChatExtensionParameters; -} - -/** Parameters for configuring Azure OpenAI Pinecone chat extensions. The supported authentication type is APIKey. */ -export interface PineconeChatExtensionParameters { - /** - * The authentication method to use when accessing the defined data source. - * Each data source type supports a specific set of available authentication methods; please see the documentation of - * the data source for supported mechanisms. - * If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential) - * authentication. - */ - authentication?: OnYourDataAuthenticationOptions; - /** The configured top number of documents to feature for the configured query. */ - top_n_documents?: number; - /** Whether queries should be restricted to use of indexed data. */ - in_scope?: boolean; - /** The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer. */ - strictness?: number; - /** Give the model instructions about how it should behave and any context it should reference when generating a response. You can describe the assistant's personality and tell it how to format responses. There's a 100 token limit for it, and it counts against the overall token limit. */ - role_information?: string; - /** The environment name of Pinecone. */ - environment: string; - /** The name of the Pinecone database index. */ - index_name: string; - /** Customized field mapping behavior to use when interacting with the search index. */ - fields_mapping: PineconeFieldMappingOptions; - /** The embedding dependency for vector search. */ - embedding_dependency: OnYourDataVectorizationSource; -} - -/** Optional settings to control how fields are processed when using a configured Pinecone resource. */ -export interface PineconeFieldMappingOptions { - /** The name of the index field to use as a title. */ - title_field?: string; - /** The name of the index field to use as a URL. */ - url_field?: string; - /** The name of the index field to use as a filepath. */ - filepath_field?: string; - /** The names of index fields that should be treated as content. */ - content_fields: string[]; - /** The separator pattern that content fields should use. */ - content_fields_separator?: string; + type: TruncationStrategy; + /** The number of most recent messages from the thread when constructing the context for the run. */ + last_messages?: number | null; } -/** A representation of the available Azure OpenAI enhancement configurations. */ -export interface AzureChatEnhancementConfiguration { - /** A representation of the available options for the Azure OpenAI grounding enhancement. */ - grounding?: AzureChatGroundingEnhancementConfiguration; - /** A representation of the available options for the Azure OpenAI optical character recognition (OCR) enhancement. */ - ocr?: AzureChatOCREnhancementConfiguration; +/** Specifies a tool the model should use. Use to force the model to call a specific tool. */ +export interface AssistantsNamedToolChoice { + /** the type of tool. If type is `function`, the function name must be set. */ + type: AssistantsNamedToolChoiceType; + /** The name of the function to call */ + function?: FunctionName; } -/** A representation of the available options for the Azure OpenAI grounding enhancement. */ -export interface AzureChatGroundingEnhancementConfiguration { - /** Specifies whether the enhancement is enabled. */ - enabled: boolean; -} - -/** A representation of the available options for the Azure OpenAI optical character recognition (OCR) enhancement. */ -export interface AzureChatOCREnhancementConfiguration { - /** Specifies whether the enhancement is enabled. */ - enabled: boolean; -} - -/** - * An abstract representation of a response format configuration usable by Chat Completions. Can be used to enable JSON - * mode. - */ -export interface ChatCompletionsResponseFormatParent { - type: string; -} - -/** - * The standard Chat Completions response format that can freely generate text and is not guaranteed to produce response - * content that adheres to a specific schema. - */ -export interface ChatCompletionsTextResponseFormat extends ChatCompletionsResponseFormatParent { - /** The discriminated object type, which is always 'text' for this format. */ - type: "text"; -} - -/** A response format for Chat Completions that restricts responses to emitting valid JSON objects. */ -export interface ChatCompletionsJsonResponseFormat extends ChatCompletionsResponseFormatParent { - /** The discriminated object type, which is always 'json_object' for this format. */ - type: "json_object"; -} - -/** An abstract representation of a tool that can be used by the model to improve a chat completions response. */ -export interface ChatCompletionsToolDefinitionParent { - type: string; -} - -/** The definition information for a chat completions function tool that can call a function in response to a tool call. */ -export interface ChatCompletionsFunctionToolDefinition extends ChatCompletionsToolDefinitionParent { - /** The object name, which is always 'function'. */ - type: "function"; - /** The function definition details for the function tool. */ - function: FunctionDefinition; -} - -/** An abstract representation of an explicit, named tool selection to use for a chat completions request. */ -export interface ChatCompletionsNamedToolSelectionParent { - type: string; -} - -/** A tool selection of a specific, named function tool that will limit chat completions to using the named function. */ -export interface ChatCompletionsNamedFunctionToolSelection - extends ChatCompletionsNamedToolSelectionParent { - /** The object type, which is always 'function'. */ - type: "function"; - /** The function that should be called. */ - function: ChatCompletionsFunctionToolSelection; -} - -/** A tool selection of a specific, named function tool that will limit chat completions to using the named function. */ -export interface ChatCompletionsFunctionToolSelection { - /** The name of the function that should be called. */ +/** The function name that will be used, if using the `function` tool */ +export interface FunctionName { + /** The name of the function to call */ name: string; } -/** Represents the request data used to generate images. */ -export interface ImageGenerationOptions { - /** - * The model name or Azure OpenAI model deployment name to use for image generation. If not specified, dall-e-2 will be - * inferred as a default. - */ - model?: string; - /** A description of the desired images. */ - prompt: string; - /** - * The number of images to generate. - * Dall-e-2 models support values between 1 and 10. - * Dall-e-3 models only support a value of 1. - */ - n?: number; - /** - * The desired dimensions for generated images. - * Dall-e-2 models support 256x256, 512x512, or 1024x1024. - * Dall-e-3 models support 1024x1024, 1792x1024, or 1024x1792. +/** The data provided during a tool outputs submission to resolve pending tool calls and allow the model to continue. */ +export interface ToolOutput { + /** The ID of the tool call being resolved, as provided in the tool calls of a required action from a run. */ + tool_call_id?: string; + /** The output from the tool to be submitted. */ + output?: string; +} + +/** The details used when creating and immediately running a new assistant thread. */ +export interface CreateAndRunThreadOptions { + /** The ID of the assistant for which the thread should be created. */ + assistant_id: string; + /** The details used to create the new thread. If no thread is provided, an empty one will be created. */ + thread?: AssistantThreadCreationOptions; + /** The overridden model that the assistant should use to run the thread. */ + model?: string | null; + /** The overridden system instructions the assistant should use to run the thread. */ + instructions?: string | null; + /** The overridden list of enabled tools the assistant should use to run the thread. */ + tools?: Array | null; + /** Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. */ + tool_resources?: UpdateToolResourcesOptions | null; + /** + * If `true`, returns a stream of events that happen during the Run as server-sent events, + * terminating when the Run enters a terminal state with a `data: [DONE]` message. */ - size?: ImageSize; - /** The format in which image generation response items should be presented. */ - response_format?: ImageGenerationResponseFormat; - /** - * The desired image generation quality level to use. - * Only configurable with dall-e-3 models. - */ - quality?: ImageGenerationQuality; + stream?: boolean; /** - * The desired image generation style to use. - * Only configurable with dall-e-3 models. + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + * more random, while lower values like 0.2 will make it more focused and deterministic. */ - style?: ImageGenerationStyle; - /** A unique identifier representing your end-user, which can help to monitor and detect abuse. */ - user?: string; -} - -/** - * The configuration information for an embeddings request. - * Embeddings measure the relatedness of text strings and are commonly used for search, clustering, - * recommendations, and other similar scenarios. - */ -export interface EmbeddingsOptions { + temperature?: number | null; /** - * An identifier for the caller or end user of the operation. This may be used for tracking - * or rate-limiting purposes. + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + * comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. */ - user?: string; + top_p?: number | null; /** - * The model name to provide as part of this embeddings request. - * Not applicable to Azure OpenAI, where deployment information should be included in the Azure - * resource URI that's connected to. + * The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only + * the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, + * the run will end with status `incomplete`. See `incomplete_details` for more info. */ - model?: string; + max_prompt_tokens?: number | null; /** - * Input texts to get embeddings for, encoded as a an array of strings. - * Each input must not exceed 2048 tokens in length. - * - * Unless you are embedding code, we suggest replacing newlines (\\n) in your input with a single space, - * as we have observed inferior results when newlines are present. + * The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only + * the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens + * specified, the run will end with status `incomplete`. See `incomplete_details` for more info. */ - input: string[]; - /** The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. */ - dimensions?: number; + max_completion_tokens?: number | null; + /** The strategy to use for dropping messages as the context windows moves forward. */ + truncation_strategy?: TruncationObject | null; + /** Controls whether or not and which tool is called by the model. */ + tool_choice?: AssistantsApiToolChoiceOption | null; + /** Specifies the format that the model must output. */ + response_format?: AssistantsApiResponseFormatOption | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record | null; } -/** A response containing error details. */ -export interface ErrorResponse { - /** The error object. */ - error: OpenAIErrorModel; +/** The expiration policy for a vector store. */ +export interface VectorStoreExpirationPolicy { + /** Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. */ + anchor: VectorStoreExpirationPolicyAnchor; + /** The anchor timestamp after which the expiration policy applies. */ + days: number; } -/** The error object. */ -export interface OpenAIErrorModel { - /** A human-readable representation of the error. */ - message: string; - /** The parameters of the error. */ - param: string | null; - /** Type of the error. */ - type: string | null; - /** The error code. */ - code: string | null; -} - -/** An abstract representation of a chat message as provided in a request. */ -export type ChatRequestMessage = - | ChatRequestMessageParent - | ChatRequestSystemMessage - | ChatRequestUserMessage - | ChatRequestAssistantMessage - | ChatRequestToolMessage - | ChatRequestFunctionMessage; -/** An abstract representation of a structured content item within a chat message. */ -export type ChatMessageContentItem = - | ChatMessageContentItemParent - | ChatMessageTextContentItem - | ChatMessageImageContentItem; -/** - * An abstract representation of a tool call that must be resolved in a subsequent request to perform the requested - * chat completion. - */ -export type ChatCompletionsToolCall = - | ChatCompletionsToolCallParent - | ChatCompletionsFunctionToolCall; -/** - * A representation of configuration data for a single Azure OpenAI chat extension. This will be used by a chat - * completions request that should use Azure OpenAI chat extensions to augment the response behavior. - * The use of this configuration is compatible only with Azure OpenAI. - */ -export type AzureChatExtensionConfiguration = - | AzureChatExtensionConfigurationParent - | AzureSearchChatExtensionConfiguration - | AzureMachineLearningIndexChatExtensionConfiguration - | AzureCosmosDBChatExtensionConfiguration - | ElasticsearchChatExtensionConfiguration - | PineconeChatExtensionConfiguration; -/** The authentication options for Azure OpenAI On Your Data. */ -export type OnYourDataAuthenticationOptions = - | OnYourDataAuthenticationOptionsParent - | OnYourDataApiKeyAuthenticationOptions - | OnYourDataConnectionStringAuthenticationOptions - | OnYourDataKeyAndKeyIdAuthenticationOptions - | OnYourDataEncodedApiKeyAuthenticationOptions - | OnYourDataAccessTokenAuthenticationOptions - | OnYourDataSystemAssignedManagedIdentityAuthenticationOptions - | OnYourDataUserAssignedManagedIdentityAuthenticationOptions; -/** An abstract representation of a vectorization source for Azure OpenAI On Your Data with vector search. */ -export type OnYourDataVectorizationSource = - | OnYourDataVectorizationSourceParent - | OnYourDataEndpointVectorizationSource - | OnYourDataDeploymentNameVectorizationSource - | OnYourDataModelIdVectorizationSource; -/** - * An abstract representation of a response format configuration usable by Chat Completions. Can be used to enable JSON - * mode. - */ -export type ChatCompletionsResponseFormat = - | ChatCompletionsResponseFormatParent - | ChatCompletionsTextResponseFormat - | ChatCompletionsJsonResponseFormat; -/** An abstract representation of a tool that can be used by the model to improve a chat completions response. */ -export type ChatCompletionsToolDefinition = - | ChatCompletionsToolDefinitionParent - | ChatCompletionsFunctionToolDefinition; -/** An abstract representation of an explicit, named tool selection to use for a chat completions request. */ -export type ChatCompletionsNamedToolSelection = - | ChatCompletionsNamedToolSelectionParent - | ChatCompletionsNamedFunctionToolSelection; -/** Alias for AudioTranscriptionFormat */ -export type AudioTranscriptionFormat = string | "json" | "verbose_json" | "text" | "srt" | "vtt"; -/** Alias for AudioTranslationFormat */ -export type AudioTranslationFormat = string | "json" | "verbose_json" | "text" | "srt" | "vtt"; -/** Alias for ChatRole */ -export type ChatRole = string | "system" | "assistant" | "user" | "function" | "tool"; -/** Alias for ChatMessageImageDetailLevel */ -export type ChatMessageImageDetailLevel = string | "auto" | "low" | "high"; -/** Alias for FunctionCallPreset */ -export type FunctionCallPreset = string | "auto" | "none"; -/** Alias for AzureChatExtensionType */ -export type AzureChatExtensionType = - | string - | "azure_search" - | "azure_ml_index" - | "azure_cosmos_db" - | "elasticsearch" - | "pinecone"; -/** Alias for OnYourDataAuthenticationType */ -export type OnYourDataAuthenticationType = - | string - | "api_key" - | "connection_string" - | "key_and_key_id" - | "encoded_api_key" - | "access_token" - | "system_assigned_managed_identity" - | "user_assigned_managed_identity"; -/** Alias for AzureSearchQueryType */ -export type AzureSearchQueryType = +/** Request object for creating a vector store. */ +export interface VectorStoreOptions { + /** A list of file IDs that the vector store should use. Useful for tools like `file_search` that can access files. */ + file_ids?: string[]; + /** The name of the vector store. */ + name?: string; + /** Details on when this vector store expires */ + expires_after?: VectorStoreExpirationPolicy; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record | null; +} + +/** Request object for updating a vector store. */ +export interface VectorStoreUpdateOptions { + /** The name of the vector store. */ + name?: string | null; + /** Details on when this vector store expires */ + expires_after?: VectorStoreExpirationPolicy | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata?: Record | null; +} + +/** An abstract representation of an input tool definition that an assistant can use. */ +export type ToolDefinition = + | ToolDefinitionParent + | CodeInterpreterToolDefinition + | FileSearchToolDefinition + | FunctionToolDefinition; +/** An abstract representation of a single item of thread message content. */ +export type MessageContent = + | MessageContentParent + | MessageTextContent + | MessageImageFileContent; +/** An abstract representation of an annotation to text thread message content. */ +export type MessageTextAnnotation = + | MessageTextAnnotationParent + | MessageTextFileCitationAnnotation + | MessageTextFilePathAnnotation; +/** Alias for CreateFileSearchToolResourceOptions */ +export type CreateFileSearchToolResourceOptions = + | string[] + | Array; +/** Alias for AssistantsApiResponseFormatMode */ +export type AssistantsApiResponseFormatMode = string; +/** Alias for ApiResponseFormat */ +export type ApiResponseFormat = string; +/** Alias for AssistantsApiResponseFormatOption */ +export type AssistantsApiResponseFormatOption = | string - | "simple" - | "semantic" - | "vector" - | "vector_simple_hybrid" - | "vector_semantic_hybrid"; -/** Alias for OnYourDataVectorizationSourceType */ -export type OnYourDataVectorizationSourceType = + | AssistantsApiResponseFormatMode + | AssistantsApiResponseFormat; +/** Alias for ListSortOrder */ +export type ListSortOrder = string; +/** Alias for MessageRole */ +export type MessageRole = string; +/** Alias for MessageAttachmentToolDefinition */ +export type MessageAttachmentToolDefinition = + | CodeInterpreterToolDefinition + | FileSearchToolDefinition; +/** Alias for MessageStatus */ +export type MessageStatus = string; +/** Alias for MessageIncompleteDetailsReason */ +export type MessageIncompleteDetailsReason = string; +/** Alias for TruncationStrategy */ +export type TruncationStrategy = string; +/** Alias for AssistantsApiToolChoiceOptionMode */ +export type AssistantsApiToolChoiceOptionMode = string; +/** Alias for AssistantsNamedToolChoiceType */ +export type AssistantsNamedToolChoiceType = string; +/** Alias for AssistantsApiToolChoiceOption */ +export type AssistantsApiToolChoiceOption = | string - | "endpoint" - | "deployment_name" - | "model_id"; -/** Alias for ElasticsearchQueryType */ -export type ElasticsearchQueryType = string | "simple" | "vector"; -/** Alias for ChatCompletionsToolSelectionPreset */ -export type ChatCompletionsToolSelectionPreset = string | "auto" | "none"; -/** Alias for ImageSize */ -export type ImageSize = string | "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792"; -/** Alias for ImageGenerationResponseFormat */ -export type ImageGenerationResponseFormat = string | "url" | "b64_json"; -/** Alias for ImageGenerationQuality */ -export type ImageGenerationQuality = string | "standard" | "hd"; -/** Alias for ImageGenerationStyle */ -export type ImageGenerationStyle = string | "natural" | "vivid"; + | AssistantsApiToolChoiceOptionMode + | AssistantsNamedToolChoice; +/** Alias for FilePurpose */ +export type FilePurpose = string; +/** Alias for VectorStoreExpirationPolicyAnchor */ +export type VectorStoreExpirationPolicyAnchor = string; +/** Alias for VectorStoreFileStatusFilter */ +export type VectorStoreFileStatusFilter = string; diff --git a/sdk/openai/openai/src/rest/openAIClient.ts b/sdk/openai/openai/src/rest/openAIClient.ts deleted file mode 100644 index 65a47268e5f4..000000000000 --- a/sdk/openai/openai/src/rest/openAIClient.ts +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { getClient, ClientOptions } from "@azure-rest/core-client"; -import { logger } from "../logger.js"; -import { TokenCredential, KeyCredential } from "@azure/core-auth"; -import { OpenAIContext } from "./clientDefinitions.js"; - -/** - * Initialize a new instance of `OpenAIContext` - * @param endpoint - Supported Cognitive Services endpoints (protocol and hostname, for example: - * https://westus.api.cognitive.microsoft.com). - * @param credentials - uniquely identify client credential - * @param options - the parameter for all optional parameters - */ -export default function createClient( - endpoint: string, - credentials: TokenCredential | KeyCredential, - options: ClientOptions = {}, -): OpenAIContext { - const baseUrl = options.baseUrl ?? `${endpoint}/openai`; - options.apiVersion = options.apiVersion ?? "2024-03-01-preview"; - const userAgentInfo = `azsdk-js-openai-rest/1.0.0-beta.12`; - const userAgentPrefix = - options.userAgentOptions && options.userAgentOptions.userAgentPrefix - ? `${options.userAgentOptions.userAgentPrefix} ${userAgentInfo}` - : `${userAgentInfo}`; - options = { - ...options, - userAgentOptions: { - userAgentPrefix, - }, - loggingOptions: { - logger: options.loggingOptions?.logger ?? logger.info, - }, - credentials: { - scopes: options.credentials?.scopes ?? ["https://cognitiveservices.azure.com/.default"], - apiKeyHeaderName: options.credentials?.apiKeyHeaderName ?? "api-key", - }, - }; - - const client = getClient(baseUrl, credentials, options) as OpenAIContext; - - return client; -} diff --git a/sdk/openai/openai/src/rest/outputModels.ts b/sdk/openai/openai/src/rest/outputModels.ts index afb9f19faeed..4b3934934b76 100644 --- a/sdk/openai/openai/src/rest/outputModels.ts +++ b/sdk/openai/openai/src/rest/outputModels.ts @@ -1,789 +1,940 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import { ErrorModel } from "@azure-rest/core-client"; +/** An abstract representation of an input tool definition that an assistant can use. */ +export interface ToolDefinitionOutputParent { + type: string; +} -/** A specific deployment */ -export interface DeploymentOutput { - /** Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request. */ - readonly deploymentId: string; +/** The input definition information for a code interpreter tool as used to configure an assistant. */ +export interface CodeInterpreterToolDefinitionOutput + extends ToolDefinitionOutputParent { + /** The object type, which is always 'code_interpreter'. */ + type: "code_interpreter"; } -/** Result information for an operation that transcribed spoken audio into written text. */ -export interface AudioTranscriptionOutput { - /** The transcribed text for the provided audio data. */ - text: string; - /** The label that describes which operation type generated the accompanying response data. */ - task?: AudioTaskLabelOutput; - /** - * The spoken language that was detected in the transcribed audio data. - * This is expressed as a two-letter ISO-639-1 language code like 'en' or 'fr'. - */ - language?: string; - /** The total duration of the audio processed to produce accompanying transcription information. */ - duration?: number; - /** A collection of information about the timing, probabilities, and other detail of each processed audio segment. */ - segments?: Array; +/** The input definition information for a file search tool as used to configure an assistant. */ +export interface FileSearchToolDefinitionOutput + extends ToolDefinitionOutputParent { + /** The object type, which is always 'file_search'. */ + type: "file_search"; } -/** - * Extended information about a single segment of transcribed audio data. - * Segments generally represent roughly 5-10 seconds of speech. Segment boundaries typically occur between words but not - * necessarily sentences. - */ -export interface AudioTranscriptionSegmentOutput { - /** The 0-based index of this segment within a transcription. */ - id: number; - /** The time at which this segment started relative to the beginning of the transcribed audio. */ - start: number; - /** The time at which this segment ended relative to the beginning of the transcribed audio. */ - end: number; - /** The transcribed text that was part of this audio segment. */ - text: string; - /** The temperature score associated with this audio segment. */ - temperature: number; - /** The average log probability associated with this audio segment. */ - avg_logprob: number; - /** The compression ratio of this audio segment. */ - compression_ratio: number; - /** The probability of no speech detection within this audio segment. */ - no_speech_prob: number; - /** The token IDs matching the transcribed text in this audio segment. */ - tokens: number[]; - /** - * The seek position associated with the processing of this audio segment. - * Seek positions are expressed as hundredths of seconds. - * The model may process several segments from a single seek position, so while the seek position will never represent - * a later time than the segment's start, the segment's start may represent a significantly later time than the - * segment's associated seek position. - */ - seek: number; +/** The input definition information for a function tool as used to configure an assistant. */ +export interface FunctionToolDefinitionOutput + extends ToolDefinitionOutputParent { + /** The object type, which is always 'function'. */ + type: "function"; + /** The definition of the concrete function that the function tool should call. */ + function: FunctionDefinitionOutput; } -/** Result information for an operation that translated spoken audio into written text. */ -export interface AudioTranslationOutput { - /** The translated text for the provided audio data. */ - text: string; - /** The label that describes which operation type generated the accompanying response data. */ - task?: AudioTaskLabelOutput; - /** - * The spoken language that was detected in the translated audio data. - * This is expressed as a two-letter ISO-639-1 language code like 'en' or 'fr'. - */ - language?: string; - /** The total duration of the audio processed to produce accompanying translation information. */ - duration?: number; - /** A collection of information about the timing, probabilities, and other detail of each processed audio segment. */ - segments?: Array; +/** The input definition information for a function. */ +export interface FunctionDefinitionOutput { + /** The name of the function to be called. */ + name: string; + /** A description of what the function does, used by the model to choose when and how to call the function. */ + description?: string; + /** The parameters the functions accepts, described as a JSON Schema object. */ + parameters: any; } /** - * Extended information about a single segment of translated audio data. - * Segments generally represent roughly 5-10 seconds of speech. Segment boundaries typically occur between words but not - * necessarily sentences. + * An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. + * If `text` the model can return text or any value needed. */ -export interface AudioTranslationSegmentOutput { - /** The 0-based index of this segment within a translation. */ - id: number; - /** The time at which this segment started relative to the beginning of the translated audio. */ - start: number; - /** The time at which this segment ended relative to the beginning of the translated audio. */ - end: number; - /** The translated text that was part of this audio segment. */ - text: string; - /** The temperature score associated with this audio segment. */ - temperature: number; - /** The average log probability associated with this audio segment. */ - avg_logprob: number; - /** The compression ratio of this audio segment. */ - compression_ratio: number; - /** The probability of no speech detection within this audio segment. */ - no_speech_prob: number; - /** The token IDs matching the translated text in this audio segment. */ - tokens: number[]; - /** - * The seek position associated with the processing of this audio segment. - * Seek positions are expressed as hundredths of seconds. - * The model may process several segments from a single seek position, so while the seek position will never represent - * a later time than the segment's start, the segment's start may represent a significantly later time than the - * segment's associated seek position. - */ - seek: number; +export interface AssistantsApiResponseFormatOutput { + /** Must be one of `text` or `json_object`. */ + type?: ApiResponseFormatOutput; } -/** - * Representation of the response data from a completions request. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. - */ -export interface CompletionsOutput { - /** A unique identifier associated with this completions response. */ +/** Represents an assistant that can call the model and use tools. */ +export interface AssistantOutput { + /** The identifier, which can be referenced in API endpoints. */ id: string; + /** The object type, which is always assistant. */ + object: "assistant"; + /** The Unix timestamp, in seconds, representing when this object was created. */ + created_at: number; + /** The name of the assistant. */ + name: string | null; + /** The description of the assistant. */ + description: string | null; + /** The ID of the model to use. */ + model: string; + /** The system instructions for the assistant to use. */ + instructions: string | null; + /** The collection of tools enabled for the assistant. */ + tools: Array; /** - * The first timestamp associated with generation activity for this completions response, - * represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. + * A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` + * tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. */ - created: number; + tool_resources: ToolResourcesOutput | null; /** - * Content filtering results for zero or more prompts in the request. In a streaming request, - * results for different prompts may arrive at different times or in different orders. + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, + * while lower values like 0.2 will make it more focused and deterministic. */ - prompt_filter_results?: Array; + temperature: number | null; /** - * The collection of completions choices associated with this completions response. - * Generally, `n` choices are generated per provided prompt with a default value of 1. - * Token limits and other settings may limit the number of choices generated. + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + * So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. */ - choices: Array; - /** Usage information for tokens processed and generated as part of this completions operation. */ - usage: CompletionsUsageOutput; + top_p: number | null; + /** The response format of the tool calls used by this assistant. */ + response_format?: AssistantsApiResponseFormatOptionOutput | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata: Record | null; } -/** Content filtering results for a single prompt in the request. */ -export interface ContentFilterResultsForPromptOutput { - /** The index of this prompt in the set of prompt results */ - prompt_index: number; - /** Content filtering results for this prompt */ - content_filter_results: ContentFilterResultDetailsForPromptOutput; +/** + * A set of resources that are used by the assistant's tools. The resources are specific to the type of + * tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` + * tool requires a list of vector store IDs. + */ +export interface ToolResourcesOutput { + /** Resources to be used by the `code_interpreter tool` consisting of file IDs. */ + code_interpreter?: CodeInterpreterToolResourceOutput; + /** Resources to be used by the `file_search` tool consisting of vector store IDs. */ + file_search?: FileSearchToolResourceOutput; } -/** Information about content filtering evaluated against input data to Azure OpenAI. */ -export interface ContentFilterResultDetailsForPromptOutput { - /** - * Describes language related to anatomical organs and genitals, romantic relationships, - * acts portrayed in erotic or affectionate terms, physical sexual acts, including - * those portrayed as an assault or a forced sexual violent act against one’s will, - * prostitution, pornography, and abuse. - */ - sexual?: ContentFilterResultOutput; - /** - * Describes language related to physical actions intended to hurt, injure, damage, or - * kill someone or something; describes weapons, etc. - */ - violence?: ContentFilterResultOutput; - /** - * Describes language attacks or uses that include pejorative or discriminatory language - * with reference to a person or identity group on the basis of certain differentiating - * attributes of these groups including but not limited to race, ethnicity, nationality, - * gender identity and expression, sexual orientation, religion, immigration status, ability - * status, personal appearance, and body size. - */ - hate?: ContentFilterResultOutput; - /** - * Describes language related to physical actions intended to purposely hurt, injure, - * or damage one’s body, or kill oneself. - */ - self_harm?: ContentFilterResultOutput; - /** Describes whether profanity was detected. */ - profanity?: ContentFilterDetectionResultOutput; - /** Describes detection results against configured custom blocklists. */ - custom_blocklists?: Array; +/** A set of resources that are used by the `code_interpreter` tool. */ +export interface CodeInterpreterToolResourceOutput { /** - * Describes an error returned if the content filtering system is - * down or otherwise unable to complete the operation in time. + * A list of file IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. */ - error?: ErrorModel; - /** Whether a jailbreak attempt was detected in the prompt. */ - jailbreak?: ContentFilterDetectionResultOutput; + file_ids: string[]; } -/** Information about filtered content severity level and if it has been filtered or not. */ -export interface ContentFilterResultOutput { - /** Ratings for the intensity and risk level of filtered content. */ - severity: ContentFilterSeverityOutput; - /** A value indicating whether or not the content has been filtered. */ - filtered: boolean; +/** A set of resources that are used by the `file_search` tool. */ +export interface FileSearchToolResourceOutput { + /** + * The ID of the vector store attached to this assistant. There can be a maximum of 1 vector + * store attached to the assistant. + */ + vector_store_ids?: string[]; } -/** Represents the outcome of a detection operation performed by content filtering. */ -export interface ContentFilterDetectionResultOutput { - /** A value indicating whether or not the content has been filtered. */ - filtered: boolean; - /** A value indicating whether detection occurred, irrespective of severity or whether the content was filtered. */ - detected: boolean; +/** The response data for a requested list of items. */ +export interface OpenAIPageableListOfOutput { + /** The object type, which is always list. */ + object: "list"; + /** The requested list of items. */ + data: Array; + /** The first ID represented in this list. */ + first_id: string; + /** The last ID represented in this list. */ + last_id: string; + /** A value indicating whether there are additional values available not captured in this list. */ + has_more: boolean; } -/** Represents the outcome of an evaluation against a custom blocklist as performed by content filtering. */ -export interface ContentFilterBlocklistIdResultOutput { - /** The ID of the custom blocklist evaluated. */ +/** The status of an assistant deletion operation. */ +export interface AssistantDeletionStatusOutput { + /** The ID of the resource specified for deletion. */ id: string; - /** A value indicating whether or not the content has been filtered. */ - filtered: boolean; + /** A value indicating whether deletion was successful. */ + deleted: boolean; + /** The object type, which is always 'assistant.deleted'. */ + object: "assistant.deleted"; } -/** - * The representation of a single prompt completion as part of an overall completions request. - * Generally, `n` choices are generated per provided prompt with a default value of 1. - * Token limits and other settings may limit the number of choices generated. - */ -export interface ChoiceOutput { - /** The generated text for a given completions prompt. */ - text: string; - /** The ordered index associated with this completions choice. */ - index: number; - /** - * Information about the content filtering category (hate, sexual, violence, self_harm), if it - * has been detected, as well as the severity level (very_low, low, medium, high-scale that - * determines the intensity and risk level of harmful content) and if it has been filtered or not. - */ - content_filter_results?: ContentFilterResultsForChoiceOutput; - /** The log probabilities model for tokens associated with this completions choice. */ - logprobs: CompletionsLogProbabilityModelOutput | null; - /** Reason for finishing */ - finish_reason: CompletionsFinishReasonOutput | null; +/** This describes to which tools a file has been attached. */ +export interface MessageAttachmentOutput { + /** The ID of the file to attach to the message. */ + file_id: string; + /** The tools to add to this file. */ + tools: MessageAttachmentToolDefinitionOutput[]; } -/** Information about content filtering evaluated against generated model output. */ -export interface ContentFilterResultsForChoiceOutput { - /** - * Describes language related to anatomical organs and genitals, romantic relationships, - * acts portrayed in erotic or affectionate terms, physical sexual acts, including - * those portrayed as an assault or a forced sexual violent act against one’s will, - * prostitution, pornography, and abuse. - */ - sexual?: ContentFilterResultOutput; - /** - * Describes language related to physical actions intended to hurt, injure, damage, or - * kill someone or something; describes weapons, etc. - */ - violence?: ContentFilterResultOutput; - /** - * Describes language attacks or uses that include pejorative or discriminatory language - * with reference to a person or identity group on the basis of certain differentiating - * attributes of these groups including but not limited to race, ethnicity, nationality, - * gender identity and expression, sexual orientation, religion, immigration status, ability - * status, personal appearance, and body size. - */ - hate?: ContentFilterResultOutput; - /** - * Describes language related to physical actions intended to purposely hurt, injure, - * or damage one’s body, or kill oneself. - */ - self_harm?: ContentFilterResultOutput; - /** Describes whether profanity was detected. */ - profanity?: ContentFilterDetectionResultOutput; - /** Describes detection results against configured custom blocklists. */ - custom_blocklists?: Array; +/** Information about a single thread associated with an assistant. */ +export interface AssistantThreadOutput { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always 'thread'. */ + object: "thread"; + /** The Unix timestamp, in seconds, representing when this object was created. */ + created_at: number; /** - * Describes an error returned if the content filtering system is - * down or otherwise unable to complete the operation in time. + * A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type + * of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list + * of vector store IDs. */ - error?: ErrorModel; - /** Information about detection of protected text material. */ - protected_material_text?: ContentFilterDetectionResultOutput; - /** Information about detection of protected code material. */ - protected_material_code?: ContentFilterCitedDetectionResultOutput; -} - -/** Represents the outcome of a detection operation against protected resources as performed by content filtering. */ -export interface ContentFilterCitedDetectionResultOutput { - /** A value indicating whether or not the content has been filtered. */ - filtered: boolean; - /** A value indicating whether detection occurred, irrespective of severity or whether the content was filtered. */ - detected: boolean; - /** The internet location associated with the detection. */ - URL?: string; - /** The license description associated with the detection. */ - license: string; -} - -/** Representation of a log probabilities model for a completions generation. */ -export interface CompletionsLogProbabilityModelOutput { - /** The textual forms of tokens evaluated in this probability model. */ - tokens: string[]; - /** A collection of log probability values for the tokens in this completions data. */ - token_logprobs: (number | null)[]; - /** A mapping of tokens to maximum log probability values in this completions data. */ - top_logprobs: Record[]; - /** The text offsets associated with tokens in this completions data. */ - text_offset: number[]; + tool_resources: ToolResourcesOutput | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata: Record | null; } -/** - * Representation of the token counts processed for a completions request. - * Counts consider all tokens across prompts, choices, choice alternates, best_of generations, and - * other consumers. - */ -export interface CompletionsUsageOutput { - /** The number of tokens generated across all completions emissions. */ - completion_tokens: number; - /** The number of tokens in the provided prompts for the completions request. */ - prompt_tokens: number; - /** The total number of tokens processed for the completions request and response. */ - total_tokens: number; +/** The status of a thread deletion operation. */ +export interface ThreadDeletionStatusOutput { + /** The ID of the resource specified for deletion. */ + id: string; + /** A value indicating whether deletion was successful. */ + deleted: boolean; + /** The object type, which is always 'thread.deleted'. */ + object: "thread.deleted"; } -/** - * An abstract representation of a tool call that must be resolved in a subsequent request to perform the requested - * chat completion. - */ -export interface ChatCompletionsToolCallOutputParent { - /** The ID of the tool call. */ +/** A single, existing message within an assistant thread. */ +export interface ThreadMessageOutput { + /** The identifier, which can be referenced in API endpoints. */ id: string; + /** The object type, which is always 'thread.message'. */ + object: "thread.message"; + /** The Unix timestamp, in seconds, representing when this object was created. */ + created_at: number; + /** The ID of the thread that this message belongs to. */ + thread_id: string; + /** The status of the message. */ + status: MessageStatusOutput; + /** On an incomplete message, details about why the message is incomplete. */ + incomplete_details: MessageIncompleteDetailsOutput | null; + /** The Unix timestamp (in seconds) for when the message was completed. */ + completed_at: number | null; + /** The Unix timestamp (in seconds) for when the message was marked as incomplete. */ + incomplete_at: number | null; + /** The role associated with the assistant thread message. */ + role: MessageRoleOutput; + /** The list of content items associated with the assistant thread message. */ + content: Array; + /** If applicable, the ID of the assistant that authored this message. */ + assistant_id: string | null; + /** If applicable, the ID of the run associated with the authoring of this message. */ + run_id: string | null; + /** A list of files attached to the message, and the tools they were added to. */ + attachments: Array | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata: Record | null; +} + +/** Information providing additional detail about a message entering an incomplete status. */ +export interface MessageIncompleteDetailsOutput { + /** The provided reason describing why the message was marked as incomplete. */ + reason: MessageIncompleteDetailsReasonOutput; +} + +/** An abstract representation of a single item of thread message content. */ +export interface MessageContentOutputParent { type: string; } -/** - * A tool call to a function tool, issued by the model in evaluation of a configured function tool, that represents - * a function invocation needed for a subsequent chat completions request to resolve. - */ -export interface ChatCompletionsFunctionToolCallOutput extends ChatCompletionsToolCallOutputParent { - /** The type of tool call, in this case always 'function'. */ - type: "function"; - /** The details of the function invocation requested by the tool call. */ - function: FunctionCallOutput; +/** A representation of a textual item of thread message content. */ +export interface MessageTextContentOutput extends MessageContentOutputParent { + /** The object type, which is always 'text'. */ + type: "text"; + /** The text and associated annotations for this thread message content item. */ + text: MessageTextDetailsOutput; } -/** The name and arguments of a function that should be called, as generated by the model. */ -export interface FunctionCallOutput { - /** The name of the function to call. */ - name: string; - /** - * The arguments to call the function with, as generated by the model in JSON format. - * Note that the model does not always generate valid JSON, and may hallucinate parameters - * not defined by your function schema. Validate the arguments in your code before calling - * your function. - */ - arguments: string; +/** The text and associated annotations for a single item of assistant thread message content. */ +export interface MessageTextDetailsOutput { + /** The text data. */ + value: string; + /** A list of annotations associated with this text. */ + annotations: Array; } -/** - * Representation of the response data from a chat completions request. - * Completions support a wide variety of tasks and generate text that continues from or "completes" - * provided prompt data. - */ -export interface ChatCompletionsOutput { - /** A unique identifier associated with this chat completions response. */ - id: string; - /** The current model used for the chat completions request. */ - model: string; - /** - * The first timestamp associated with generation activity for this completions response, - * represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970. - */ - created: number; - /** - * The collection of completions choices associated with this completions response. - * Generally, `n` choices are generated per provided prompt with a default value of 1. - * Token limits and other settings may limit the number of choices generated. - */ - choices: Array; - /** - * Content filtering results for zero or more prompts in the request. In a streaming request, - * results for different prompts may arrive at different times or in different orders. - */ - prompt_filter_results?: Array; - /** - * Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that - * might impact determinism. - */ - system_fingerprint?: string; - /** Usage information for tokens processed and generated as part of this completions operation. */ - usage: CompletionsUsageOutput; +/** An abstract representation of an annotation to text thread message content. */ +export interface MessageTextAnnotationOutputParent { + /** The textual content associated with this text annotation item. */ + text: string; + type: string; } -/** - * The representation of a single prompt completion as part of an overall chat completions request. - * Generally, `n` choices are generated per provided prompt with a default value of 1. - * Token limits and other settings may limit the number of choices generated. - */ -export interface ChatChoiceOutput { - /** The chat message for a given chat completions prompt. */ - message?: ChatResponseMessageOutput; - /** The log probability information for this choice, as enabled via the 'logprobs' request option. */ - logprobs: ChatChoiceLogProbabilityInfoOutput | null; - /** The ordered index associated with this chat completions choice. */ - index: number; - /** The reason that this chat completions choice completed its generated. */ - finish_reason: CompletionsFinishReasonOutput | null; +/** A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the 'file_search' tool to search files. */ +export interface MessageTextFileCitationAnnotationOutput + extends MessageTextAnnotationOutputParent { + /** The object type, which is always 'file_citation'. */ + type: "file_citation"; /** - * The reason the model stopped generating tokens, together with any applicable details. - * This structured representation replaces 'finish_reason' for some models. + * A citation within the message that points to a specific quote from a specific file. + * Generated when the assistant uses the "file_search" tool to search files. */ - finish_details?: ChatFinishDetailsOutput; - /** The delta message content for a streaming response. */ - delta?: ChatResponseMessageOutput; - /** - * Information about the content filtering category (hate, sexual, violence, self_harm), if it - * has been detected, as well as the severity level (very_low, low, medium, high-scale that - * determines the intensity and risk level of harmful content) and if it has been filtered or not. - */ - content_filter_results?: ContentFilterResultsForChoiceOutput; - /** - * Represents the output results of Azure OpenAI enhancements to chat completions, as configured via the matching input - * provided in the request. This supplementary information is only available when using Azure OpenAI and only when the - * request is configured to use enhancements. - */ - enhancements?: AzureChatEnhancementsOutput; + file_citation: MessageTextFileCitationDetailsOutput; + /** The first text index associated with this text annotation. */ + start_index?: number; + /** The last text index associated with this text annotation. */ + end_index?: number; } -/** A representation of a chat message as received in a response. */ -export interface ChatResponseMessageOutput { - /** The chat role associated with the message. */ - role: ChatRoleOutput; - /** The content of the message. */ - content: string | null; - /** - * The tool calls that must be resolved and have their outputs appended to subsequent input messages for the chat - * completions request to resolve as configured. - */ - tool_calls?: Array; - /** - * The function call that must be resolved and have its output appended to subsequent input messages for the chat - * completions request to resolve as configured. - */ - function_call?: FunctionCallOutput; - /** - * If Azure OpenAI chat extensions are configured, this array represents the incremental steps performed by those - * extensions while processing the chat completions request. - */ - context?: AzureChatExtensionsMessageContextOutput; +/** A representation of a file-based text citation, as used in a file-based annotation of text thread message content. */ +export interface MessageTextFileCitationDetailsOutput { + /** The ID of the file associated with this citation. */ + file_id: string; + /** The specific quote cited in the associated file. */ + quote: string; +} + +/** A citation within the message that points to a file located at a specific path. */ +export interface MessageTextFilePathAnnotationOutput + extends MessageTextAnnotationOutputParent { + /** The object type, which is always 'file_path'. */ + type: "file_path"; + /** A URL for the file that's generated when the assistant used the code_interpreter tool to generate a file. */ + file_path: MessageTextFilePathDetailsOutput; + /** The first text index associated with this text annotation. */ + start_index?: number; + /** The last text index associated with this text annotation. */ + end_index?: number; +} + +/** An encapsulation of an image file ID, as used by message image content. */ +export interface MessageTextFilePathDetailsOutput { + /** The ID of the specific file that the citation is from. */ + file_id: string; +} + +/** A representation of image file content in a thread message. */ +export interface MessageImageFileContentOutput + extends MessageContentOutputParent { + /** The object type, which is always 'image_file'. */ + type: "image_file"; + /** The image file for this thread message content item. */ + image_file: MessageImageFileDetailsOutput; +} + +/** An image reference, as represented in thread message content. */ +export interface MessageImageFileDetailsOutput { + /** The ID for the file associated with this image. */ + file_id: string; +} + +/** The response data for a requested list of items. */ +export interface OpenAIPageableListOfOutput { + /** The object type, which is always list. */ + object: "list"; + /** The requested list of items. */ + data: Array; + /** The first ID represented in this list. */ + first_id: string; + /** The last ID represented in this list. */ + last_id: string; + /** A value indicating whether there are additional values available not captured in this list. */ + has_more: boolean; } /** - * A representation of the additional context information available when Azure OpenAI chat extensions are involved - * in the generation of a corresponding chat completions response. This context information is only populated when - * using an Azure OpenAI request configured to use a matching extension. + * Controls for how a thread will be truncated prior to the run. Use this to control the initial + * context window of the run. */ -export interface AzureChatExtensionsMessageContextOutput { +export interface TruncationObjectOutput { /** - * The contextual information associated with the Azure chat extensions used for a chat completions request. - * These messages describe the data source retrievals, plugin invocations, and other intermediate steps taken in the - * course of generating a chat completions response that was augmented by capabilities from Azure OpenAI chat - * extensions. + * The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will + * be truncated to the `lastMessages` count most recent messages in the thread. When set to `auto`, messages in the middle of the thread + * will be dropped to fit the context length of the model, `max_prompt_tokens`. */ - citations?: Array; - /** The detected intent from the chat history, used to pass to the next turn to carry over the context. */ - intent?: string; + type: TruncationStrategyOutput; + /** The number of most recent messages from the thread when constructing the context for the run. */ + last_messages?: number | null; } -/** - * A single instance of additional context information available when Azure OpenAI chat extensions are involved - * in the generation of a corresponding chat completions response. This context information is only populated when - * using an Azure OpenAI request configured to use a matching extension. - */ -export interface AzureChatExtensionDataSourceResponseCitationOutput { - /** The content of the citation. */ - content: string; - /** The title of the citation. */ - title?: string; - /** The URL of the citation. */ - url?: string; - /** The file path of the citation. */ - filepath?: string; - /** The chunk ID of the citation. */ - chunk_id?: string; -} - -/** Log probability information for a choice, as requested via 'logprobs' and 'top_logprobs'. */ -export interface ChatChoiceLogProbabilityInfoOutput { - /** The list of log probability information entries for the choice's message content tokens, as requested via the 'logprobs' option. */ - content: Array | null; -} - -/** A representation of the log probability information for a single content token, including a list of most likely tokens if 'top_logprobs' were requested. */ -export interface ChatTokenLogProbabilityResultOutput { - /** The message content token. */ - token: string; - /** The log probability of the message content token. */ - logprob: number; - /** A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token. */ - bytes: number[] | null; - /** The list of most likely tokens and their log probability information, as requested via 'top_logprobs'. */ - top_logprobs: Array | null; -} - -/** A representation of the log probability information for a single message content token. */ -export interface ChatTokenLogProbabilityInfoOutput { - /** The message content token. */ - token: string; - /** The log probability of the message content token. */ - logprob: number; - /** A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token. */ - bytes: number[] | null; -} - -/** An abstract representation of structured information about why a chat completions response terminated. */ -export interface ChatFinishDetailsOutputParent { +/** Specifies a tool the model should use. Use to force the model to call a specific tool. */ +export interface AssistantsNamedToolChoiceOutput { + /** the type of tool. If type is `function`, the function name must be set. */ + type: AssistantsNamedToolChoiceTypeOutput; + /** The name of the function to call */ + function?: FunctionNameOutput; +} + +/** The function name that will be used, if using the `function` tool */ +export interface FunctionNameOutput { + /** The name of the function to call */ + name: string; +} + +/** Data representing a single evaluation run of an assistant thread. */ +export interface ThreadRunOutput { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always 'thread.run'. */ + object: "thread.run"; + /** The ID of the thread associated with this run. */ + thread_id: string; + /** The ID of the assistant associated with the thread this run was performed against. */ + assistant_id: string; + /** The status of the assistant thread run. */ + status: RunStatusOutput; + /** The details of the action required for the assistant thread run to continue. */ + required_action?: RequiredActionOutput | null; + /** The last error, if any, encountered by this assistant thread run. */ + last_error: RunErrorOutput | null; + /** The ID of the model to use. */ + model: string; + /** The overridden system instructions used for this assistant thread run. */ + instructions: string; + /** The overridden enabled tools used for this assistant thread run. */ + tools: Array; + /** The Unix timestamp, in seconds, representing when this object was created. */ + created_at: number; + /** The Unix timestamp, in seconds, representing when this item expires. */ + expires_at: number | null; + /** The Unix timestamp, in seconds, representing when this item was started. */ + started_at: number | null; + /** The Unix timestamp, in seconds, representing when this completed. */ + completed_at: number | null; + /** The Unix timestamp, in seconds, representing when this was cancelled. */ + cancelled_at: number | null; + /** The Unix timestamp, in seconds, representing when this failed. */ + failed_at: number | null; + /** Details on why the run is incomplete. Will be `null` if the run is not incomplete. */ + incomplete_details; + /** Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). */ + usage: RunCompletionUsageOutput | null; + /** The sampling temperature used for this run. If not set, defaults to 1. */ + temperature?: number | null; + /** The nucleus sampling value used for this run. If not set, defaults to 1. */ + top_p?: number | null; + /** The maximum number of prompt tokens specified to have been used over the course of the run. */ + max_prompt_tokens: number | null; + /** The maximum number of completion tokens specified to have been used over the course of the run. */ + max_completion_tokens: number | null; + /** The strategy to use for dropping messages as the context windows moves forward. */ + truncation_strategy: TruncationObjectOutput | null; + /** Controls whether or not and which tool is called by the model. */ + tool_choice: AssistantsApiToolChoiceOptionOutput | null; + /** The response format of the tool calls used in this run. */ + response_format: AssistantsApiResponseFormatOptionOutput | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata: Record | null; +} + +/** An abstract representation of a required action for an assistant thread run to continue. */ +export interface RequiredActionOutputParent { type: string; } -/** A structured representation of a stop reason that signifies natural termination by the model. */ -export interface StopFinishDetailsOutput extends ChatFinishDetailsOutputParent { - /** The object type, which is always 'stop' for this object. */ - type: "stop"; - /** The token sequence that the model terminated with. */ - stop: string; +/** The details for required tool calls that must be submitted for an assistant thread run to continue. */ +export interface SubmitToolOutputsActionOutput + extends RequiredActionOutputParent { + /** The object type, which is always 'submit_tool_outputs'. */ + type: "submit_tool_outputs"; + /** The details describing tools that should be called to submit tool outputs. */ + submit_tool_outputs: SubmitToolOutputsDetailsOutput; } -/** - * A structured representation of a stop reason that signifies a token limit was reached before the model could naturally - * complete. - */ -export interface MaxTokensFinishDetailsOutput extends ChatFinishDetailsOutputParent { - /** The object type, which is always 'max_tokens' for this object. */ - type: "max_tokens"; +/** The details describing tools that should be called to submit tool outputs. */ +export interface SubmitToolOutputsDetailsOutput { + /** The list of tool calls that must be resolved for the assistant thread run to continue. */ + tool_calls: Array; } -/** - * Represents the output results of Azure enhancements to chat completions, as configured via the matching input provided - * in the request. - */ -export interface AzureChatEnhancementsOutput { - /** The grounding enhancement that returns the bounding box of the objects detected in the image. */ - grounding?: AzureGroundingEnhancementOutput; +/** An abstract representation a a tool invocation needed by the model to continue a run. */ +export interface RequiredToolCallOutputParent { + /** The ID of the tool call. This ID must be referenced when submitting tool outputs. */ + id: string; + type: string; } -/** The grounding enhancement that returns the bounding box of the objects detected in the image. */ -export interface AzureGroundingEnhancementOutput { - /** The lines of text detected by the grounding enhancement. */ - lines: Array; +/** A representation of a requested call to a function tool, needed by the model to continue evaluation of a run. */ +export interface RequiredFunctionToolCallOutput + extends RequiredToolCallOutputParent { + /** The object type of the required tool call. Always 'function' for function tools. */ + type: "function"; + /** Detailed information about the function to be executed by the tool that includes name and arguments. */ + function: RequiredFunctionToolCallDetailsOutput; } -/** A content line object consisting of an adjacent sequence of content elements, such as words and selection marks. */ -export interface AzureGroundingEnhancementLineOutput { - /** The text within the line. */ - text: string; - /** An array of spans that represent detected objects and its bounding box information. */ - spans: Array; +/** The detailed information for a function invocation, as provided by a required action invoking a function tool, that includes the name of and arguments to the function. */ +export interface RequiredFunctionToolCallDetailsOutput { + /** The name of the function. */ + name: string; + /** The arguments to use when invoking the named function, as provided by the model. Arguments are presented as a JSON document that should be validated and parsed for evaluation. */ + arguments: string; } -/** A span object that represents a detected object and its bounding box information. */ -export interface AzureGroundingEnhancementLineSpanOutput { - /** The text content of the span that represents the detected object. */ - text: string; - /** - * The character offset within the text where the span begins. This offset is defined as the position of the first - * character of the span, counting from the start of the text as Unicode codepoints. - */ - offset: number; - /** The length of the span in characters, measured in Unicode codepoints. */ - length: number; - /** An array of objects representing points in the polygon that encloses the detected object. */ - polygon: Array; +/** The details of an error as encountered by an assistant thread run. */ +export interface RunErrorOutput { + /** The status for the error. */ + code: string; + /** The human-readable text associated with the error. */ + message: string; } -/** A representation of a single polygon point as used by the Azure grounding enhancement. */ -export interface AzureGroundingEnhancementCoordinatePointOutput { - /** The x-coordinate (horizontal axis) of the point. */ - x: number; - /** The y-coordinate (vertical axis) of the point. */ - y: number; +/** Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). */ +export interface RunCompletionUsageOutput { + /** Number of completion tokens used over the course of the run. */ + completion_tokens: number; + /** Number of prompt tokens used over the course of the run. */ + prompt_tokens: number; + /** Total number of tokens used (prompt + completion). */ + total_tokens: number; } -/** Represents the request data used to generate images. */ -export interface ImageGenerationOptionsOutput { - /** - * The model name or Azure OpenAI model deployment name to use for image generation. If not specified, dall-e-2 will be - * inferred as a default. - */ - model?: string; - /** A description of the desired images. */ - prompt: string; - /** - * The number of images to generate. - * Dall-e-2 models support values between 1 and 10. - * Dall-e-3 models only support a value of 1. - */ - n?: number; - /** - * The desired dimensions for generated images. - * Dall-e-2 models support 256x256, 512x512, or 1024x1024. - * Dall-e-3 models support 1024x1024, 1792x1024, or 1024x1792. - */ - size?: ImageSizeOutput; - /** The format in which image generation response items should be presented. */ - response_format?: ImageGenerationResponseFormatOutput; - /** - * The desired image generation quality level to use. - * Only configurable with dall-e-3 models. - */ - quality?: ImageGenerationQualityOutput; - /** - * The desired image generation style to use. - * Only configurable with dall-e-3 models. - */ - style?: ImageGenerationStyleOutput; - /** A unique identifier representing your end-user, which can help to monitor and detect abuse. */ - user?: string; +/** The response data for a requested list of items. */ +export interface OpenAIPageableListOfOutput { + /** The object type, which is always list. */ + object: "list"; + /** The requested list of items. */ + data: Array; + /** The first ID represented in this list. */ + first_id: string; + /** The last ID represented in this list. */ + last_id: string; + /** A value indicating whether there are additional values available not captured in this list. */ + has_more: boolean; } -/** The result of a successful image generation operation. */ -export interface ImageGenerationsOutput { - /** - * A timestamp representing when this operation was started. - * Expressed in seconds since the Unix epoch of 1970-01-01T00:00:00+0000. - */ - created: number; - /** The images generated by the operation. */ - data: Array; +/** Detailed information about a single step of an assistant thread run. */ +export interface RunStepOutput { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always 'thread.run.step'. */ + object: "thread.run.step"; + /** The type of run step, which can be either message_creation or tool_calls. */ + type: RunStepTypeOutput; + /** The ID of the assistant associated with the run step. */ + assistant_id: string; + /** The ID of the thread that was run. */ + thread_id: string; + /** The ID of the run that this run step is a part of. */ + run_id: string; + /** The status of this run step. */ + status: RunStepStatusOutput; + /** The details for this run step. */ + step_details: RunStepDetailsOutput; + /** If applicable, information about the last error encountered by this run step. */ + last_error: RunStepErrorOutput | null; + /** The Unix timestamp, in seconds, representing when this object was created. */ + created_at: number; + /** The Unix timestamp, in seconds, representing when this item expired. */ + expired_at: number | null; + /** The Unix timestamp, in seconds, representing when this completed. */ + completed_at: number | null; + /** The Unix timestamp, in seconds, representing when this was cancelled. */ + cancelled_at: number | null; + /** The Unix timestamp, in seconds, representing when this failed. */ + failed_at: number | null; + /** Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. */ + usage?: RunStepCompletionUsageOutput | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata: Record | null; +} + +/** An abstract representation of the details for a run step. */ +export interface RunStepDetailsOutputParent { + type: RunStepTypeOutput; +} + +/** The detailed information associated with a message creation run step. */ +export interface RunStepMessageCreationDetailsOutput + extends RunStepDetailsOutputParent { + /** The object type, which is always 'message_creation'. */ + type: "message_creation"; + /** Information about the message creation associated with this run step. */ + message_creation: RunStepMessageCreationReferenceOutput; +} + +/** The details of a message created as a part of a run step. */ +export interface RunStepMessageCreationReferenceOutput { + /** The ID of the message created by this run step. */ + message_id: string; +} + +/** The detailed information associated with a run step calling tools. */ +export interface RunStepToolCallDetailsOutput + extends RunStepDetailsOutputParent { + /** The object type, which is always 'tool_calls'. */ + type: "tool_calls"; + /** A list of tool call details for this run step. */ + tool_calls: Array; +} + +/** An abstract representation of a detailed tool call as recorded within a run step for an existing run. */ +export interface RunStepToolCallOutputParent { + /** The ID of the tool call. This ID must be referenced when you submit tool outputs. */ + id: string; + type: string; } /** - * A representation of a single generated image, provided as either base64-encoded data or as a URL from which the image - * may be retrieved. + * A record of a call to a code interpreter tool, issued by the model in evaluation of a defined tool, that + * represents inputs and outputs consumed and emitted by the code interpreter. */ -export interface ImageGenerationDataOutput { - /** The URL that provides temporary access to download the generated image. */ - url?: string; - /** The complete data for an image, represented as a base64-encoded string. */ - b64_json?: string; - /** Information about the content filtering results. */ - content_filter_results?: ImageGenerationContentFilterResultsOutput; - /** - * The final prompt used by the model to generate the image. - * Only provided with dall-3-models and only when revisions were made to the prompt. - */ - revised_prompt?: string; - /** - * Information about the content filtering category (hate, sexual, violence, self_harm), if - * it has been detected, as well as the severity level (very_low, low, medium, high-scale - * that determines the intensity and risk level of harmful content) and if it has been - * filtered or not. Information about jailbreak content and profanity, if it has been detected, - * and if it has been filtered or not. And information about customer block list, if it has - * been filtered and its id. - */ - prompt_filter_results?: ImageGenerationPromptFilterResultsOutput; +export interface RunStepCodeInterpreterToolCallOutput + extends RunStepToolCallOutputParent { + /** The object type, which is always 'code_interpreter'. */ + type: "code_interpreter"; + /** The details of the tool call to the code interpreter tool. */ + code_interpreter: RunStepCodeInterpreterToolCallDetailsOutput; } -/** Describes the content filtering result for the image generation request. */ -export interface ImageGenerationContentFilterResultsOutput { - /** - * Describes language related to anatomical organs and genitals, romantic relationships, - * acts portrayed in erotic or affectionate terms, physical sexual acts, including - * those portrayed as an assault or a forced sexual violent act against one’s will, - * prostitution, pornography, and abuse. - */ - sexual?: ContentFilterResultOutput; - /** - * Describes language related to physical actions intended to hurt, injure, damage, or - * kill someone or something; describes weapons, etc. - */ - violence?: ContentFilterResultOutput; - /** - * Describes language attacks or uses that include pejorative or discriminatory language - * with reference to a person or identity group on the basis of certain differentiating - * attributes of these groups including but not limited to race, ethnicity, nationality, - * gender identity and expression, sexual orientation, religion, immigration status, ability - * status, personal appearance, and body size. - */ - hate?: ContentFilterResultOutput; - /** - * Describes language related to physical actions intended to purposely hurt, injure, - * or damage one’s body, or kill oneself. - */ - self_harm?: ContentFilterResultOutput; +/** The detailed information about a code interpreter invocation by the model. */ +export interface RunStepCodeInterpreterToolCallDetailsOutput { + /** The input provided by the model to the code interpreter tool. */ + input: string; + /** The outputs produced by the code interpreter tool back to the model in response to the tool call. */ + outputs: Array; } -/** Describes the content filtering results for the prompt of a image generation request. */ -export interface ImageGenerationPromptFilterResultsOutput { - /** - * Describes language related to anatomical organs and genitals, romantic relationships, - * acts portrayed in erotic or affectionate terms, physical sexual acts, including - * those portrayed as an assault or a forced sexual violent act against one’s will, - * prostitution, pornography, and abuse. - */ - sexual?: ContentFilterResultOutput; - /** - * Describes language related to physical actions intended to hurt, injure, damage, or - * kill someone or something; describes weapons, etc. - */ - violence?: ContentFilterResultOutput; - /** - * Describes language attacks or uses that include pejorative or discriminatory language - * with reference to a person or identity group on the basis of certain differentiating - * attributes of these groups including but not limited to race, ethnicity, nationality, - * gender identity and expression, sexual orientation, religion, immigration status, ability - * status, personal appearance, and body size. - */ - hate?: ContentFilterResultOutput; - /** - * Describes language related to physical actions intended to purposely hurt, injure, - * or damage one’s body, or kill oneself. - */ - self_harm?: ContentFilterResultOutput; - /** Describes whether profanity was detected. */ - profanity?: ContentFilterDetectionResultOutput; - /** Whether a jailbreak attempt was detected in the prompt. */ - jailbreak?: ContentFilterDetectionResultOutput; +/** An abstract representation of an emitted output from a code interpreter tool. */ +export interface RunStepCodeInterpreterToolCallOutputOutputParent { + type: string; +} + +/** A representation of a log output emitted by a code interpreter tool in response to a tool call by the model. */ +export interface RunStepCodeInterpreterLogOutputOutput + extends RunStepCodeInterpreterToolCallOutputOutputParent { + /** The object type, which is always 'logs'. */ + type: "logs"; + /** The serialized log output emitted by the code interpreter. */ + logs: string; +} + +/** A representation of an image output emitted by a code interpreter tool in response to a tool call by the model. */ +export interface RunStepCodeInterpreterImageOutputOutput + extends RunStepCodeInterpreterToolCallOutputOutputParent { + /** The object type, which is always 'image'. */ + type: "image"; + /** Referential information for the image associated with this output. */ + image: RunStepCodeInterpreterImageReferenceOutput; +} + +/** An image reference emitted by a code interpreter tool in response to a tool call by the model. */ +export interface RunStepCodeInterpreterImageReferenceOutput { + /** The ID of the file associated with this image. */ + file_id: string; } /** - * Representation of the response data from an embeddings request. - * Embeddings measure the relatedness of text strings and are commonly used for search, clustering, - * recommendations, and other similar scenarios. + * A record of a call to a file search tool, issued by the model in evaluation of a defined tool, that represents + * executed file search. */ -export interface EmbeddingsOutput { - /** Embedding values for the prompts submitted in the request. */ - data: Array; - /** Usage counts for tokens input using the embeddings API. */ - usage: EmbeddingsUsageOutput; +export interface RunStepFileSearchToolCallOutput + extends RunStepToolCallOutputParent { + /** The object type, which is always 'file_search'. */ + type: "file_search"; + /** Reserved for future use. */ + file_search: Record; } -/** Representation of a single embeddings relatedness comparison. */ -export interface EmbeddingItemOutput { - /** - * List of embeddings value for the input prompt. These represent a measurement of the - * vector-based relatedness of the provided input. - */ - embedding: number[]; - /** Index of the prompt to which the EmbeddingItem corresponds. */ - index: number; +/** + * A record of a call to a function tool, issued by the model in evaluation of a defined tool, that represents the inputs + * and output consumed and emitted by the specified function. + */ +export interface RunStepFunctionToolCallOutput + extends RunStepToolCallOutputParent { + /** The object type, which is always 'function'. */ + type: "function"; + /** The detailed information about the function called by the model. */ + function: RunStepFunctionToolCallDetailsOutput; +} + +/** The detailed information about the function called by the model. */ +export interface RunStepFunctionToolCallDetailsOutput { + /** The name of the function. */ + name: string; + /** The arguments that the model requires are provided to the named function. */ + arguments: string; + /** The output of the function, only populated for function calls that have already have had their outputs submitted. */ + output: string | null; +} + +/** The error information associated with a failed run step. */ +export interface RunStepErrorOutput { + /** The error code for this error. */ + code: RunStepErrorCodeOutput; + /** The human-readable text associated with this error. */ + message: string; } -/** Measurement of the amount of tokens used in this request and response. */ -export interface EmbeddingsUsageOutput { - /** Number of tokens sent in the original request. */ +/** Usage statistics related to the run step. */ +export interface RunStepCompletionUsageOutput { + /** Number of completion tokens used over the course of the run step. */ + completion_tokens: number; + /** Number of prompt tokens used over the course of the run step. */ prompt_tokens: number; - /** Total number of tokens transacted in this request/response. */ + /** Total number of tokens used (prompt + completion). */ total_tokens: number; } -/** - * An abstract representation of a tool call that must be resolved in a subsequent request to perform the requested - * chat completion. - */ -export type ChatCompletionsToolCallOutput = - | ChatCompletionsToolCallOutputParent - | ChatCompletionsFunctionToolCallOutput; -/** An abstract representation of structured information about why a chat completions response terminated. */ -export type ChatFinishDetailsOutput = - | ChatFinishDetailsOutputParent - | StopFinishDetailsOutput - | MaxTokensFinishDetailsOutput; -/** Alias for AudioTaskLabelOutput */ -export type AudioTaskLabelOutput = string | "transcribe" | "translate"; -/** Alias for ContentFilterSeverityOutput */ -export type ContentFilterSeverityOutput = string | "safe" | "low" | "medium" | "high"; -/** Alias for CompletionsFinishReasonOutput */ -export type CompletionsFinishReasonOutput = - | string - | "stop" - | "length" - | "content_filter" - | "function_call" - | "tool_calls"; -/** Alias for ChatRoleOutput */ -export type ChatRoleOutput = string | "system" | "assistant" | "user" | "function" | "tool"; -/** Alias for ImageSizeOutput */ -export type ImageSizeOutput = +/** The response data for a requested list of items. */ +export interface OpenAIPageableListOfOutput { + /** The object type, which is always list. */ + object: "list"; + /** The requested list of items. */ + data: Array; + /** The first ID represented in this list. */ + first_id: string; + /** The last ID represented in this list. */ + last_id: string; + /** A value indicating whether there are additional values available not captured in this list. */ + has_more: boolean; +} + +/** The response data from a file list operation. */ +export interface FileListResponseOutput { + /** The object type, which is always 'list'. */ + object: "list"; + /** The files returned for the request. */ + data: Array; +} + +/** Represents an assistant that can call the model and use tools. */ +export interface OpenAIFileOutput { + /** The object type, which is always 'file'. */ + object: "file"; + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The size of the file, in bytes. */ + bytes: number; + /** The name of the file. */ + filename: string; + /** The Unix timestamp, in seconds, representing when this object was created. */ + created_at: number; + /** The intended purpose of a file. */ + purpose: FilePurposeOutput; + /** The state of the file. This field is available in Azure OpenAI only. */ + status?: FileStateOutput; + /** The error message with details in case processing of this file failed. This field is available in Azure OpenAI only. */ + status_details?: string; +} + +/** A status response from a file deletion operation. */ +export interface FileDeletionStatusOutput { + /** The ID of the resource specified for deletion. */ + id: string; + /** A value indicating whether deletion was successful. */ + deleted: boolean; + /** The object type, which is always 'file'. */ + object: "file"; +} + +/** The response data for a requested list of items. */ +export interface OpenAIPageableListOfOutput { + /** The object type, which is always list. */ + object: "list"; + /** The requested list of items. */ + data: Array; + /** The first ID represented in this list. */ + first_id: string; + /** The last ID represented in this list. */ + last_id: string; + /** A value indicating whether there are additional values available not captured in this list. */ + has_more: boolean; +} + +/** A vector store is a collection of processed files can be used by the `file_search` tool. */ +export interface VectorStoreOutput { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always `vector_store` */ + object: "vector_store"; + /** The Unix timestamp (in seconds) for when the vector store was created. */ + created_at: number; + /** The name of the vector store. */ + name: string; + /** The total number of bytes used by the files in the vector store. */ + usage_bytes: number; + /** Files count grouped by status processed or being processed by this vector store. */ + file_counts: VectorStoreFileCountOutput; + /** The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. */ + status: VectorStoreStatusOutput; + /** Details on when this vector store expires */ + expires_after?: VectorStoreExpirationPolicyOutput; + /** The Unix timestamp (in seconds) for when the vector store will expire. */ + expires_at?: number | null; + /** The Unix timestamp (in seconds) for when the vector store was last active. */ + last_active_at: number | null; + /** A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. */ + metadata: Record | null; +} + +/** Counts of files processed or being processed by this vector store grouped by status. */ +export interface VectorStoreFileCountOutput { + /** The number of files that are currently being processed. */ + in_progress: number; + /** The number of files that have been successfully processed. */ + completed: number; + /** The number of files that have failed to process. */ + failed: number; + /** The number of files that were cancelled. */ + cancelled: number; + /** The total number of files. */ + total: number; +} + +/** The expiration policy for a vector store. */ +export interface VectorStoreExpirationPolicyOutput { + /** Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`. */ + anchor: VectorStoreExpirationPolicyAnchorOutput; + /** The anchor timestamp after which the expiration policy applies. */ + days: number; +} + +/** Response object for deleting a vector store. */ +export interface VectorStoreDeletionStatusOutput { + /** The ID of the resource specified for deletion. */ + id: string; + /** A value indicating whether deletion was successful. */ + deleted: boolean; + /** The object type, which is always 'vector_store.deleted'. */ + object: "vector_store.deleted"; +} + +/** The response data for a requested list of items. */ +export interface OpenAIPageableListOfOutput { + /** The object type, which is always list. */ + object: "list"; + /** The requested list of items. */ + data: Array; + /** The first ID represented in this list. */ + first_id: string; + /** The last ID represented in this list. */ + last_id: string; + /** A value indicating whether there are additional values available not captured in this list. */ + has_more: boolean; +} + +/** Description of a file attached to a vector store. */ +export interface VectorStoreFileOutput { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always `vector_store.file`. */ + object: "vector_store.file"; + /** + * The total vector store usage in bytes. Note that this may be different from the original file + * size. + */ + usage_bytes: number; + /** The Unix timestamp (in seconds) for when the vector store file was created. */ + created_at: number; + /** The ID of the vector store that the file is attached to. */ + vector_store_id: string; + /** The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. */ + status: VectorStoreFileStatusOutput; + /** The last error associated with this vector store file. Will be `null` if there are no errors. */ + last_error: VectorStoreFileErrorOutput | null; +} + +/** Details on the error that may have ocurred while processing a file for this vector store */ +export interface VectorStoreFileErrorOutput { + /** One of `server_error` or `rate_limit_exceeded`. */ + code: VectorStoreFileErrorCodeOutput; + /** A human-readable description of the error. */ + message: string; +} + +/** Response object for deleting a vector store file relationship. */ +export interface VectorStoreFileDeletionStatusOutput { + /** The ID of the resource specified for deletion. */ + id: string; + /** A value indicating whether deletion was successful. */ + deleted: boolean; + /** The object type, which is always 'vector_store.deleted'. */ + object: "vector_store.file.deleted"; +} + +/** A batch of files attached to a vector store. */ +export interface VectorStoreFileBatchOutput { + /** The identifier, which can be referenced in API endpoints. */ + id: string; + /** The object type, which is always `vector_store.file_batch`. */ + object: "vector_store.files_batch"; + /** The Unix timestamp (in seconds) for when the vector store files batch was created. */ + created_at: number; + /** The ID of the vector store that the file is attached to. */ + vector_store_id: string; + /** The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. */ + status: VectorStoreFileBatchStatusOutput; + /** Files count grouped by status processed or being processed by this vector store. */ + file_counts: VectorStoreFileCountOutput; +} + +/** An abstract representation of an input tool definition that an assistant can use. */ +export type ToolDefinitionOutput = + | ToolDefinitionOutputParent + | CodeInterpreterToolDefinitionOutput + | FileSearchToolDefinitionOutput + | FunctionToolDefinitionOutput; +/** An abstract representation of a single item of thread message content. */ +export type MessageContentOutput = + | MessageContentOutputParent + | MessageTextContentOutput + | MessageImageFileContentOutput; +/** An abstract representation of an annotation to text thread message content. */ +export type MessageTextAnnotationOutput = + | MessageTextAnnotationOutputParent + | MessageTextFileCitationAnnotationOutput + | MessageTextFilePathAnnotationOutput; +/** An abstract representation of a required action for an assistant thread run to continue. */ +export type RequiredActionOutput = + | RequiredActionOutputParent + | SubmitToolOutputsActionOutput; +/** An abstract representation a a tool invocation needed by the model to continue a run. */ +export type RequiredToolCallOutput = + | RequiredToolCallOutputParent + | RequiredFunctionToolCallOutput; +/** An abstract representation of the details for a run step. */ +export type RunStepDetailsOutput = + | RunStepDetailsOutputParent + | RunStepMessageCreationDetailsOutput + | RunStepToolCallDetailsOutput; +/** An abstract representation of a detailed tool call as recorded within a run step for an existing run. */ +export type RunStepToolCallOutput = + | RunStepToolCallOutputParent + | RunStepCodeInterpreterToolCallOutput + | RunStepFileSearchToolCallOutput + | RunStepFunctionToolCallOutput; +/** An abstract representation of an emitted output from a code interpreter tool. */ +export type RunStepCodeInterpreterToolCallOutputOutput = + | RunStepCodeInterpreterToolCallOutputOutputParent + | RunStepCodeInterpreterLogOutputOutput + | RunStepCodeInterpreterImageOutputOutput; +/** Alias for AssistantsApiResponseFormatModeOutput */ +export type AssistantsApiResponseFormatModeOutput = string; +/** Alias for ApiResponseFormatOutput */ +export type ApiResponseFormatOutput = string; +/** Alias for AssistantsApiResponseFormatOptionOutput */ +export type AssistantsApiResponseFormatOptionOutput = | string - | "256x256" - | "512x512" - | "1024x1024" - | "1792x1024" - | "1024x1792"; -/** Alias for ImageGenerationResponseFormatOutput */ -export type ImageGenerationResponseFormatOutput = string | "url" | "b64_json"; -/** Alias for ImageGenerationQualityOutput */ -export type ImageGenerationQualityOutput = string | "standard" | "hd"; -/** Alias for ImageGenerationStyleOutput */ -export type ImageGenerationStyleOutput = string | "natural" | "vivid"; -/** Alias for AzureOpenAIOperationStateOutput */ -export type AzureOpenAIOperationStateOutput = + | AssistantsApiResponseFormatModeOutput + | AssistantsApiResponseFormatOutput; +/** Alias for MessageRoleOutput */ +export type MessageRoleOutput = string; +/** Alias for MessageAttachmentToolDefinitionOutput */ +export type MessageAttachmentToolDefinitionOutput = + | CodeInterpreterToolDefinitionOutput + | FileSearchToolDefinitionOutput; +/** Alias for MessageStatusOutput */ +export type MessageStatusOutput = string; +/** Alias for MessageIncompleteDetailsReasonOutput */ +export type MessageIncompleteDetailsReasonOutput = string; +/** Alias for TruncationStrategyOutput */ +export type TruncationStrategyOutput = string; +/** Alias for AssistantsApiToolChoiceOptionModeOutput */ +export type AssistantsApiToolChoiceOptionModeOutput = string; +/** Alias for AssistantsNamedToolChoiceTypeOutput */ +export type AssistantsNamedToolChoiceTypeOutput = string; +/** Alias for AssistantsApiToolChoiceOptionOutput */ +export type AssistantsApiToolChoiceOptionOutput = | string - | "notRunning" - | "running" - | "succeeded" - | "canceled" - | "failed"; + | AssistantsApiToolChoiceOptionModeOutput + | AssistantsNamedToolChoiceOutput; +/** Alias for RunStatusOutput */ +export type RunStatusOutput = string; +/** Alias for IncompleteRunDetailsOutput */ +export type IncompleteRunDetailsOutput = string; +/** Alias for RunStepTypeOutput */ +export type RunStepTypeOutput = string; +/** Alias for RunStepStatusOutput */ +export type RunStepStatusOutput = string; +/** Alias for RunStepErrorCodeOutput */ +export type RunStepErrorCodeOutput = string; +/** Alias for FilePurposeOutput */ +export type FilePurposeOutput = string; +/** Alias for FileStateOutput */ +export type FileStateOutput = string; +/** Alias for VectorStoreStatusOutput */ +export type VectorStoreStatusOutput = string; +/** Alias for VectorStoreExpirationPolicyAnchorOutput */ +export type VectorStoreExpirationPolicyAnchorOutput = string; +/** Alias for VectorStoreFileStatusOutput */ +export type VectorStoreFileStatusOutput = string; +/** Alias for VectorStoreFileErrorCodeOutput */ +export type VectorStoreFileErrorCodeOutput = string; +/** Alias for VectorStoreFileBatchStatusOutput */ +export type VectorStoreFileBatchStatusOutput = string; diff --git a/sdk/openai/openai/src/rest/parameters.ts b/sdk/openai/openai/src/rest/parameters.ts index 50e51ae1a98e..ceecd241cc54 100644 --- a/sdk/openai/openai/src/rest/parameters.ts +++ b/sdk/openai/openai/src/rest/parameters.ts @@ -3,90 +3,311 @@ import { RequestParameters } from "@azure-rest/core-client"; import { - AudioTranscriptionOptions, - AudioTranslationOptions, - CompletionsOptions, - ChatCompletionsOptions, - ImageGenerationOptions, - EmbeddingsOptions, + AssistantCreationOptions, + ListSortOrder, + UpdateAssistantOptions, + AssistantThreadCreationOptions, + UpdateAssistantThreadOptions, + ThreadMessageOptions, + CreateRunOptions, + ToolOutput, + CreateAndRunThreadOptions, + FilePurpose, + VectorStoreOptions, + VectorStoreUpdateOptions, + VectorStoreFileStatusFilter, } from "./models.js"; -export interface GetAudioTranscriptionAsPlainTextBodyParam { - body?: AudioTranscriptionOptions; +export interface CreateAssistantBodyParam { + /** The request details to use when creating a new assistant. */ + body: AssistantCreationOptions; } -export interface GetAudioTranscriptionAsPlainTextMediaTypesParam { - /** The content type for the operation. Always multipart/form-data for this operation. */ - contentType: "multipart/form-data"; +export type CreateAssistantParameters = CreateAssistantBodyParam & + RequestParameters; + +export interface ListAssistantsQueryParamProperties { + /** A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. */ + limit?: number; + /** Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. */ + order?: ListSortOrder; + /** A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. */ + after?: string; + /** A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. */ + before?: string; +} + +export interface ListAssistantsQueryParam { + queryParameters?: ListAssistantsQueryParamProperties; } -export type GetAudioTranscriptionAsPlainTextParameters = - GetAudioTranscriptionAsPlainTextMediaTypesParam & - GetAudioTranscriptionAsPlainTextBodyParam & - RequestParameters; +export type ListAssistantsParameters = ListAssistantsQueryParam & + RequestParameters; +export type GetAssistantParameters = RequestParameters; -export interface GetAudioTranscriptionAsResponseObjectBodyParam { - body?: AudioTranscriptionOptions; +export interface UpdateAssistantBodyParam { + /** The request details to use when modifying an existing assistant. */ + body: UpdateAssistantOptions; } -export interface GetAudioTranscriptionAsResponseObjectMediaTypesParam { - /** The content type for the operation. Always multipart/form-data for this operation. */ - contentType: "multipart/form-data"; +export type UpdateAssistantParameters = UpdateAssistantBodyParam & + RequestParameters; +export type DeleteAssistantParameters = RequestParameters; + +export interface CreateThreadBodyParam { + /** The details used to create a new assistant thread. */ + body: AssistantThreadCreationOptions; } -export type GetAudioTranscriptionAsResponseObjectParameters = - GetAudioTranscriptionAsResponseObjectMediaTypesParam & - GetAudioTranscriptionAsResponseObjectBodyParam & - RequestParameters; +export type CreateThreadParameters = CreateThreadBodyParam & RequestParameters; +export type GetThreadParameters = RequestParameters; -export interface GetAudioTranslationAsPlainTextBodyParam { - body?: AudioTranslationOptions; +export interface UpdateThreadBodyParam { + /** The details used to update an existing assistant thread. */ + body: UpdateAssistantThreadOptions; } -export interface GetAudioTranslationAsPlainTextMediaTypesParam { - /** The content type for the operation. Always multipart/form-data for this operation. */ - contentType: "multipart/form-data"; +export type UpdateThreadParameters = UpdateThreadBodyParam & RequestParameters; +export type DeleteThreadParameters = RequestParameters; + +export interface CreateMessageBodyParam { + /** A single message within an assistant thread, as provided during that thread's creation for its initial state. */ + body: ThreadMessageOptions; +} + +export type CreateMessageParameters = CreateMessageBodyParam & + RequestParameters; + +export interface ListMessagesQueryParamProperties { + /** Filter messages by the run ID that generated them. */ + runId?: string; + /** A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. */ + limit?: number; + /** Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. */ + order?: ListSortOrder; + /** A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. */ + after?: string; + /** A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. */ + before?: string; +} + +export interface ListMessagesQueryParam { + queryParameters?: ListMessagesQueryParamProperties; +} + +export type ListMessagesParameters = ListMessagesQueryParam & RequestParameters; +export type GetMessageParameters = RequestParameters; + +export interface UpdateMessageBodyParam { + body?: { metadata?: Record | null }; +} + +export type UpdateMessageParameters = UpdateMessageBodyParam & + RequestParameters; + +export interface CreateRunBodyParam { + /** The details for the run to create. */ + body: CreateRunOptions; +} + +export type CreateRunParameters = CreateRunBodyParam & RequestParameters; + +export interface ListRunsQueryParamProperties { + /** A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. */ + limit?: number; + /** Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. */ + order?: ListSortOrder; + /** A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. */ + after?: string; + /** A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. */ + before?: string; +} + +export interface ListRunsQueryParam { + queryParameters?: ListRunsQueryParamProperties; +} + +export type ListRunsParameters = ListRunsQueryParam & RequestParameters; +export type GetRunParameters = RequestParameters; + +export interface UpdateRunBodyParam { + body?: { metadata?: Record | null }; +} + +export type UpdateRunParameters = UpdateRunBodyParam & RequestParameters; + +export interface SubmitToolOutputsToRunBodyParam { + body?: { tool_outputs: Array; stream?: boolean | null }; +} + +export type SubmitToolOutputsToRunParameters = SubmitToolOutputsToRunBodyParam & + RequestParameters; +export type CancelRunParameters = RequestParameters; + +export interface CreateThreadAndRunBodyParam { + /** The details used when creating and immediately running a new assistant thread. */ + body: CreateAndRunThreadOptions; } -export type GetAudioTranslationAsPlainTextParameters = - GetAudioTranslationAsPlainTextMediaTypesParam & - GetAudioTranslationAsPlainTextBodyParam & - RequestParameters; +export type CreateThreadAndRunParameters = CreateThreadAndRunBodyParam & + RequestParameters; +export type GetRunStepParameters = RequestParameters; -export interface GetAudioTranslationAsResponseObjectBodyParam { - body?: AudioTranslationOptions; +export interface ListRunStepsQueryParamProperties { + /** A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. */ + limit?: number; + /** Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. */ + order?: ListSortOrder; + /** A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. */ + after?: string; + /** A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. */ + before?: string; } -export interface GetAudioTranslationAsResponseObjectMediaTypesParam { - /** The content type for the operation. Always multipart/form-data for this operation. */ +export interface ListRunStepsQueryParam { + queryParameters?: ListRunStepsQueryParamProperties; +} + +export type ListRunStepsParameters = ListRunStepsQueryParam & RequestParameters; + +export interface ListFilesQueryParamProperties { + /** A value that, when provided, limits list results to files matching the corresponding purpose. */ + purpose?: FilePurpose; +} + +export interface ListFilesQueryParam { + queryParameters?: ListFilesQueryParamProperties; +} + +export type ListFilesParameters = ListFilesQueryParam & RequestParameters; + +export interface UploadFileBodyParam { + body?: + | FormData + | Array< + | { + name: "file"; + body: + | string + | Uint8Array + | ReadableStream + | NodeJS.ReadableStream + | File; + filename?: string; + contentType?: string; + } + | { + name: "purpose"; + body: FilePurpose; + filename?: string; + contentType?: string; + } + | { name: "filename"; body: string } + >; +} + +export interface UploadFileMediaTypesParam { + /** The 'content-type' header value, always 'multipart/format-data' for this operation. */ contentType: "multipart/form-data"; } -export type GetAudioTranslationAsResponseObjectParameters = - GetAudioTranslationAsResponseObjectMediaTypesParam & - GetAudioTranslationAsResponseObjectBodyParam & - RequestParameters; +export type UploadFileParameters = UploadFileMediaTypesParam & + UploadFileBodyParam & + RequestParameters; +export type DeleteFileParameters = RequestParameters; +export type GetFileParameters = RequestParameters; +export type GetFileContentParameters = RequestParameters; -export interface GetCompletionsBodyParam { - body?: CompletionsOptions; +export interface ListVectorStoresQueryParamProperties { + /** A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. */ + limit?: number; + /** Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. */ + order?: ListSortOrder; + /** A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. */ + after?: string; + /** A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. */ + before?: string; } -export type GetCompletionsParameters = GetCompletionsBodyParam & RequestParameters; +export interface ListVectorStoresQueryParam { + queryParameters?: ListVectorStoresQueryParamProperties; +} -export interface GetChatCompletionsBodyParam { - body?: ChatCompletionsOptions; +export type ListVectorStoresParameters = ListVectorStoresQueryParam & + RequestParameters; + +export interface CreateVectorStoreBodyParam { + /** Request object for creating a vector store. */ + body: VectorStoreOptions; +} + +export type CreateVectorStoreParameters = CreateVectorStoreBodyParam & + RequestParameters; +export type GetVectorStoreParameters = RequestParameters; + +export interface ModifyVectorStoreBodyParam { + /** Request object for updating a vector store. */ + body: VectorStoreUpdateOptions; +} + +export type ModifyVectorStoreParameters = ModifyVectorStoreBodyParam & + RequestParameters; +export type DeleteVectorStoreParameters = RequestParameters; + +export interface ListVectorStoreFilesQueryParamProperties { + /** Filter by file status. */ + filter?: VectorStoreFileStatusFilter; + /** A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. */ + limit?: number; + /** Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. */ + order?: ListSortOrder; + /** A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. */ + after?: string; + /** A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. */ + before?: string; } -export type GetChatCompletionsParameters = GetChatCompletionsBodyParam & RequestParameters; +export interface ListVectorStoreFilesQueryParam { + queryParameters?: ListVectorStoreFilesQueryParamProperties; +} + +export type ListVectorStoreFilesParameters = ListVectorStoreFilesQueryParam & + RequestParameters; -export interface GetImageGenerationsBodyParam { - body?: ImageGenerationOptions; +export interface CreateVectorStoreFileBodyParam { + body?: { file_id: string }; } -export type GetImageGenerationsParameters = GetImageGenerationsBodyParam & RequestParameters; +export type CreateVectorStoreFileParameters = CreateVectorStoreFileBodyParam & + RequestParameters; +export type GetVectorStoreFileParameters = RequestParameters; +export type DeleteVectorStoreFileParameters = RequestParameters; + +export interface CreateVectorStoreFileBatchBodyParam { + body?: { file_ids: string[] }; +} + +export type CreateVectorStoreFileBatchParameters = + CreateVectorStoreFileBatchBodyParam & RequestParameters; +export type GetVectorStoreFileBatchParameters = RequestParameters; +export type CancelVectorStoreFileBatchParameters = RequestParameters; + +export interface ListVectorStoreFileBatchFilesQueryParamProperties { + /** Filter by file status. */ + filter?: VectorStoreFileStatusFilter; + /** A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. */ + limit?: number; + /** Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. */ + order?: ListSortOrder; + /** A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. */ + after?: string; + /** A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. */ + before?: string; +} -export interface GetEmbeddingsBodyParam { - body?: EmbeddingsOptions; +export interface ListVectorStoreFileBatchFilesQueryParam { + queryParameters?: ListVectorStoreFileBatchFilesQueryParamProperties; } -export type GetEmbeddingsParameters = GetEmbeddingsBodyParam & RequestParameters; +export type ListVectorStoreFileBatchFilesParameters = + ListVectorStoreFileBatchFilesQueryParam & RequestParameters; diff --git a/sdk/openai/openai/src/rest/responses.ts b/sdk/openai/openai/src/rest/responses.ts index 18c37caa4448..18d47723d538 100644 --- a/sdk/openai/openai/src/rest/responses.ts +++ b/sdk/openai/openai/src/rest/responses.ts @@ -1,150 +1,262 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import { RawHttpHeaders } from "@azure/core-rest-pipeline"; import { HttpResponse } from "@azure-rest/core-client"; import { - AudioTranscriptionOutput, - AudioTranslationOutput, - CompletionsOutput, - ChatCompletionsOutput, - ImageGenerationsOutput, - EmbeddingsOutput, + AssistantOutput, + OpenAIPageableListOfOutput, + AssistantDeletionStatusOutput, + AssistantThreadOutput, + ThreadDeletionStatusOutput, + ThreadMessageOutput, + ThreadRunOutput, + RunStepOutput, + FileListResponseOutput, + OpenAIFileOutput, + FileDeletionStatusOutput, + VectorStoreOutput, + VectorStoreDeletionStatusOutput, + VectorStoreFileOutput, + VectorStoreFileDeletionStatusOutput, + VectorStoreFileBatchOutput, } from "./outputModels.js"; -import { ErrorResponse } from "./models.js"; -/** The request has succeeded. */ -export interface GetAudioTranscriptionAsPlainText200Response extends HttpResponse { +/** The new assistant instance. */ +export interface CreateAssistant200Response extends HttpResponse { status: "200"; - body: string; + body: AssistantOutput; } -export interface GetAudioTranscriptionAsPlainTextDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The requested list of assistants. */ +export interface ListAssistants200Response extends HttpResponse { + status: "200"; + body: OpenAIPageableListOfOutput; } -export interface GetAudioTranscriptionAsPlainTextDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & GetAudioTranscriptionAsPlainTextDefaultHeaders; +/** The requested assistant instance. */ +export interface GetAssistant200Response extends HttpResponse { + status: "200"; + body: AssistantOutput; } -/** The request has succeeded. */ -export interface GetAudioTranscriptionAsResponseObject200Response extends HttpResponse { +/** The updated assistant instance. */ +export interface UpdateAssistant200Response extends HttpResponse { status: "200"; - body: AudioTranscriptionOutput; + body: AssistantOutput; } -export interface GetAudioTranscriptionAsResponseObjectDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** Status information about the requested deletion operation. */ +export interface DeleteAssistant200Response extends HttpResponse { + status: "200"; + body: AssistantDeletionStatusOutput; } -export interface GetAudioTranscriptionAsResponseObjectDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & GetAudioTranscriptionAsResponseObjectDefaultHeaders; +/** Information about the newly created thread. */ +export interface CreateThread200Response extends HttpResponse { + status: "200"; + body: AssistantThreadOutput; } -/** The request has succeeded. */ -export interface GetAudioTranslationAsPlainText200Response extends HttpResponse { +/** Information about the requested thread. */ +export interface GetThread200Response extends HttpResponse { status: "200"; - body: string; + body: AssistantThreadOutput; } -export interface GetAudioTranslationAsPlainTextDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** Information about the modified thread. */ +export interface UpdateThread200Response extends HttpResponse { + status: "200"; + body: AssistantThreadOutput; } -export interface GetAudioTranslationAsPlainTextDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & GetAudioTranslationAsPlainTextDefaultHeaders; +/** Status information about the requested thread deletion operation. */ +export interface DeleteThread200Response extends HttpResponse { + status: "200"; + body: ThreadDeletionStatusOutput; +} + +/** A representation of the new message. */ +export interface CreateMessage200Response extends HttpResponse { + status: "200"; + body: ThreadMessageOutput; +} + +/** The requested list of messages. */ +export interface ListMessages200Response extends HttpResponse { + status: "200"; + body: OpenAIPageableListOfOutput; +} + +/** A representation of the requested message. */ +export interface GetMessage200Response extends HttpResponse { + status: "200"; + body: ThreadMessageOutput; +} + +/** A representation of the modified message. */ +export interface UpdateMessage200Response extends HttpResponse { + status: "200"; + body: ThreadMessageOutput; +} + +/** Information about the new thread run. */ +export interface CreateRun200Response extends HttpResponse { + status: "200"; + body: ThreadRunOutput; +} + +/** The requested list of thread runs. */ +export interface ListRuns200Response extends HttpResponse { + status: "200"; + body: OpenAIPageableListOfOutput; +} + +/** The requested information about the specified thread run. */ +export interface GetRun200Response extends HttpResponse { + status: "200"; + body: ThreadRunOutput; +} + +/** Information about the modified run. */ +export interface UpdateRun200Response extends HttpResponse { + status: "200"; + body: ThreadRunOutput; +} + +/** Updated information about the run. */ +export interface SubmitToolOutputsToRun200Response extends HttpResponse { + status: "200"; + body: ThreadRunOutput; +} + +/** Updated information about the cancelled run. */ +export interface CancelRun200Response extends HttpResponse { + status: "200"; + body: ThreadRunOutput; +} + +/** Information about the newly created thread. */ +export interface CreateThreadAndRun200Response extends HttpResponse { + status: "200"; + body: ThreadRunOutput; +} + +/** Information about the requested run step. */ +export interface GetRunStep200Response extends HttpResponse { + status: "200"; + body: RunStepOutput; +} + +/** The requested list of run steps. */ +export interface ListRunSteps200Response extends HttpResponse { + status: "200"; + body: OpenAIPageableListOfOutput; +} + +/** The requested list of files. */ +export interface ListFiles200Response extends HttpResponse { + status: "200"; + body: FileListResponseOutput; +} + +/** A representation of the uploaded file. */ +export interface UploadFile200Response extends HttpResponse { + status: "200"; + body: OpenAIFileOutput; } /** The request has succeeded. */ -export interface GetAudioTranslationAsResponseObject200Response extends HttpResponse { +export interface DeleteFile200Response extends HttpResponse { status: "200"; - body: AudioTranslationOutput; + body: FileDeletionStatusOutput; } -export interface GetAudioTranslationAsResponseObjectDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded. */ +export interface GetFile200Response extends HttpResponse { + status: "200"; + body: OpenAIFileOutput; +} + +/** The request has succeeded. */ +export interface GetFileContent200Response extends HttpResponse { + status: "200"; + body: string; } -export interface GetAudioTranslationAsResponseObjectDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & GetAudioTranslationAsResponseObjectDefaultHeaders; +/** The request has succeeded. */ +export interface ListVectorStores200Response extends HttpResponse { + status: "200"; + body: OpenAIPageableListOfOutput; } /** The request has succeeded. */ -export interface GetCompletions200Response extends HttpResponse { +export interface CreateVectorStore200Response extends HttpResponse { status: "200"; - body: CompletionsOutput; + body: VectorStoreOutput; } -export interface GetCompletionsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded. */ +export interface GetVectorStore200Response extends HttpResponse { + status: "200"; + body: VectorStoreOutput; } -export interface GetCompletionsDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & GetCompletionsDefaultHeaders; +/** The request has succeeded. */ +export interface ModifyVectorStore200Response extends HttpResponse { + status: "200"; + body: VectorStoreOutput; } /** The request has succeeded. */ -export interface GetChatCompletions200Response extends HttpResponse { +export interface DeleteVectorStore200Response extends HttpResponse { status: "200"; - body: ChatCompletionsOutput; + body: VectorStoreDeletionStatusOutput; } -export interface GetChatCompletionsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded. */ +export interface ListVectorStoreFiles200Response extends HttpResponse { + status: "200"; + body: OpenAIPageableListOfOutput; } -export interface GetChatCompletionsDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & GetChatCompletionsDefaultHeaders; +/** The request has succeeded. */ +export interface CreateVectorStoreFile200Response extends HttpResponse { + status: "200"; + body: VectorStoreFileOutput; } /** The request has succeeded. */ -export interface GetImageGenerations200Response extends HttpResponse { +export interface GetVectorStoreFile200Response extends HttpResponse { status: "200"; - body: ImageGenerationsOutput; + body: VectorStoreFileOutput; } -export interface GetImageGenerationsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded. */ +export interface DeleteVectorStoreFile200Response extends HttpResponse { + status: "200"; + body: VectorStoreFileDeletionStatusOutput; } -export interface GetImageGenerationsDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & GetImageGenerationsDefaultHeaders; +/** The request has succeeded. */ +export interface CreateVectorStoreFileBatch200Response extends HttpResponse { + status: "200"; + body: VectorStoreFileBatchOutput; } /** The request has succeeded. */ -export interface GetEmbeddings200Response extends HttpResponse { +export interface GetVectorStoreFileBatch200Response extends HttpResponse { status: "200"; - body: EmbeddingsOutput; + body: VectorStoreFileBatchOutput; } -export interface GetEmbeddingsDefaultHeaders { - /** String error code indicating what went wrong. */ - "x-ms-error-code"?: string; +/** The request has succeeded. */ +export interface CancelVectorStoreFileBatch200Response extends HttpResponse { + status: "200"; + body: VectorStoreFileBatchOutput; } -export interface GetEmbeddingsDefaultResponse extends HttpResponse { - status: string; - body: ErrorResponse; - headers: RawHttpHeaders & GetEmbeddingsDefaultHeaders; +/** The request has succeeded. */ +export interface ListVectorStoreFileBatchFiles200Response extends HttpResponse { + status: "200"; + body: OpenAIPageableListOfOutput; } diff --git a/sdk/openai/openai/src/utils/deserializeUtil.ts b/sdk/openai/openai/src/utils/deserializeUtil.ts new file mode 100644 index 000000000000..700314acd431 --- /dev/null +++ b/sdk/openai/openai/src/utils/deserializeUtil.ts @@ -0,0 +1,498 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { + MessageTextContent, + MessageImageFileContent, + MessageContentUnion, + MessageTextFileCitationAnnotation, + MessageTextFilePathAnnotation, + MessageTextAnnotationUnion, + SubmitToolOutputsAction, + RequiredActionUnion, + RunStepMessageCreationDetails, + RunStepToolCallDetails, + RunStepDetailsUnion, + RunStepCodeInterpreterToolCall, + RunStepFileSearchToolCall, + RunStepToolCallUnion, + RunStepCodeInterpreterImageOutput, + RunStepCodeInterpreterToolCallOutputUnion, + MessageDeltaImageFileContent, + MessageDeltaTextContentObject, + MessageDeltaContentUnion, + MessageDeltaTextFileCitationAnnotationObject, + MessageDeltaTextFilePathAnnotationObject, + MessageDeltaTextAnnotationUnion, + RunStepDeltaMessageCreation, + RunStepDeltaToolCallObject, + RunStepDeltaDetailUnion, + RunStepDeltaFileSearchToolCall, + RunStepDeltaCodeInterpreterToolCall, + RunStepDeltaToolCallUnion, + RunStepDeltaCodeInterpreterImageOutput, + RunStepDeltaCodeInterpreterOutputUnion, +} from "../models/models.js"; +import { + MessageTextContentOutput, + MessageImageFileContentOutput, + MessageContentOutput, + MessageTextFileCitationAnnotationOutput, + MessageTextFilePathAnnotationOutput, + MessageTextAnnotationOutput, + SubmitToolOutputsActionOutput, + RequiredActionOutput, + RunStepMessageCreationDetailsOutput, + RunStepToolCallDetailsOutput, + RunStepDetailsOutput, + RunStepCodeInterpreterToolCallOutput, + RunStepFileSearchToolCallOutput, + RunStepToolCallOutput, + RunStepCodeInterpreterImageOutputOutput, + RunStepCodeInterpreterToolCallOutputOutput, + MessageDeltaImageFileContentOutput, + MessageDeltaTextContentObjectOutput, + MessageDeltaContentOutput, + MessageDeltaTextFileCitationAnnotationObjectOutput, + MessageDeltaTextFilePathAnnotationObjectOutput, + MessageDeltaTextAnnotationOutput, + RunStepDeltaMessageCreationOutput, + RunStepDeltaToolCallObjectOutput, + RunStepDeltaDetailOutput, + RunStepDeltaFileSearchToolCallOutput, + RunStepDeltaCodeInterpreterToolCallOutput, + RunStepDeltaToolCallOutput, + RunStepDeltaCodeInterpreterImageOutputOutput, + RunStepDeltaCodeInterpreterOutputOutput, +} from "../rest/index.js"; + +/** deserialize function for MessageTextContent */ +function deserializeMessageTextContent( + obj: MessageTextContentOutput, +): MessageTextContent { + return { + type: obj["type"], + text: { + value: obj.text["value"], + annotations: obj.text["annotations"].map((p) => + deserializeMessageTextAnnotationUnion(p), + ), + }, + }; +} + +/** deserialize function for MessageImageFileContent */ +function deserializeMessageImageFileContent( + obj: MessageImageFileContentOutput, +): MessageImageFileContent { + return { + type: obj["type"], + imageFile: { fileId: obj.image_file["file_id"] }, + }; +} + +/** deserialize function for MessageContentOutput */ +export function deserializeMessageContentUnion( + obj: MessageContentOutput, +): MessageContentUnion { + switch (obj.type) { + case "text": + return deserializeMessageTextContent(obj as MessageTextContent); + case "image_file": + return deserializeMessageImageFileContent(obj as MessageImageFileContent); + default: + return obj; + } +} + +/** deserialize function for MessageTextFileCitationAnnotation */ +function deserializeMessageTextFileCitationAnnotation( + obj: MessageTextFileCitationAnnotationOutput, +): MessageTextFileCitationAnnotation { + return { + type: obj["type"], + text: obj["text"], + fileCitation: { + fileId: obj.file_citation["file_id"], + quote: obj.file_citation["quote"], + }, + startIndex: obj["start_index"], + endIndex: obj["end_index"], + }; +} + +/** deserialize function for MessageTextFilePathAnnotation */ +function deserializeMessageTextFilePathAnnotation( + obj: MessageTextFilePathAnnotationOutput, +): MessageTextFilePathAnnotation { + return { + type: obj["type"], + text: obj["text"], + filePath: { fileId: obj.file_path["file_id"] }, + startIndex: obj["start_index"], + endIndex: obj["end_index"], + }; +} + +/** deserialize function for MessageTextAnnotationOutput */ +export function deserializeMessageTextAnnotationUnion( + obj: MessageTextAnnotationOutput, +): MessageTextAnnotationUnion { + switch (obj.type) { + case "file_citation": + return deserializeMessageTextFileCitationAnnotation( + obj as MessageTextFileCitationAnnotation, + ); + case "file_path": + return deserializeMessageTextFilePathAnnotation( + obj as MessageTextFilePathAnnotation, + ); + default: + return obj; + } +} + +/** deserialize function for SubmitToolOutputsAction */ +function deserializeSubmitToolOutputsAction( + obj: SubmitToolOutputsActionOutput, +): SubmitToolOutputsAction { + return { + type: obj["type"], + submitToolOutputs: { toolCalls: obj.submit_tool_outputs["tool_calls"] }, + }; +} + +/** deserialize function for RequiredActionOutput */ +export function deserializeRequiredActionUnion( + obj: RequiredActionOutput, +): RequiredActionUnion { + switch (obj.type) { + case "submit_tool_outputs": + return deserializeSubmitToolOutputsAction(obj as SubmitToolOutputsAction); + default: + return obj; + } +} + +/** deserialize function for RunStepMessageCreationDetails */ +function deserializeRunStepMessageCreationDetails( + obj: RunStepMessageCreationDetailsOutput, +): RunStepMessageCreationDetails { + return { + type: obj["type"], + messageCreation: { messageId: obj.message_creation["message_id"] }, + }; +} + +/** deserialize function for RunStepToolCallDetails */ +function deserializeRunStepToolCallDetails( + obj: RunStepToolCallDetailsOutput, +): RunStepToolCallDetails { + return { + type: obj["type"], + toolCalls: obj["tool_calls"].map((p) => deserializeRunStepToolCallUnion(p)), + }; +} + +/** deserialize function for RunStepDetailsOutput */ +export function deserializeRunStepDetailsUnion( + obj: RunStepDetailsOutput, +): RunStepDetailsUnion { + switch (obj.type) { + case "message_creation": + return deserializeRunStepMessageCreationDetails( + obj as RunStepMessageCreationDetails, + ); + case "tool_calls": + return deserializeRunStepToolCallDetails(obj as RunStepToolCallDetails); + default: + return obj; + } +} + +/** deserialize function for RunStepCodeInterpreterToolCall */ +function deserializeRunStepCodeInterpreterToolCall( + obj: RunStepCodeInterpreterToolCallOutput, +): RunStepCodeInterpreterToolCall { + return { + type: obj["type"], + id: obj["id"], + codeInterpreter: { + input: obj.code_interpreter["input"], + outputs: obj.code_interpreter["outputs"].map((p) => + deserializeRunStepCodeInterpreterToolCallOutputUnion(p), + ), + }, + }; +} + +/** deserialize function for RunStepFileSearchToolCall */ +function deserializeRunStepFileSearchToolCall( + obj: RunStepFileSearchToolCallOutput, +): RunStepFileSearchToolCall { + return { type: obj["type"], id: obj["id"], fileSearch: obj["file_search"] }; +} + +/** deserialize function for RunStepToolCallOutput */ +export function deserializeRunStepToolCallUnion( + obj: RunStepToolCallOutput, +): RunStepToolCallUnion { + switch (obj.type) { + case "code_interpreter": + return deserializeRunStepCodeInterpreterToolCall( + obj as RunStepCodeInterpreterToolCall, + ); + case "file_search": + return deserializeRunStepFileSearchToolCall( + obj as RunStepFileSearchToolCall, + ); + default: + return obj; + } +} + +/** deserialize function for RunStepCodeInterpreterImageOutput */ +function deserializeRunStepCodeInterpreterImageOutput( + obj: RunStepCodeInterpreterImageOutputOutput, +): RunStepCodeInterpreterImageOutput { + return { type: obj["type"], image: { fileId: obj.image["file_id"] } }; +} + +/** deserialize function for RunStepCodeInterpreterToolCallOutputOutput */ +export function deserializeRunStepCodeInterpreterToolCallOutputUnion( + obj: RunStepCodeInterpreterToolCallOutputOutput, +): RunStepCodeInterpreterToolCallOutputUnion { + switch (obj.type) { + case "image": + return deserializeRunStepCodeInterpreterImageOutput( + obj as RunStepCodeInterpreterImageOutput, + ); + default: + return obj; + } +} + +/** deserialize function for MessageDeltaImageFileContent */ +function deserializeMessageDeltaImageFileContent( + obj: MessageDeltaImageFileContentOutput, +): MessageDeltaImageFileContent { + return { + index: obj["index"], + type: obj["type"], + imageFile: !obj.image_file + ? undefined + : { fileId: obj.image_file?.["file_id"] }, + }; +} + +/** deserialize function for MessageDeltaTextContentObject */ +function deserializeMessageDeltaTextContentObject( + obj: MessageDeltaTextContentObjectOutput, +): MessageDeltaTextContentObject { + return { + index: obj["index"], + type: obj["type"], + text: !obj.text + ? undefined + : { + value: obj.text?.["value"], + annotations: + obj.text?.["annotations"] === undefined + ? obj.text?.["annotations"] + : obj.text?.["annotations"].map((p) => + deserializeMessageDeltaTextAnnotationUnion(p), + ), + }, + }; +} + +/** deserialize function for MessageDeltaContentOutput */ +export function deserializeMessageDeltaContentUnion( + obj: MessageDeltaContentOutput, +): MessageDeltaContentUnion { + switch (obj.type) { + case "image_file": + return deserializeMessageDeltaImageFileContent( + obj as MessageDeltaImageFileContent, + ); + case "text": + return deserializeMessageDeltaTextContentObject( + obj as MessageDeltaTextContentObject, + ); + default: + return obj; + } +} + +/** deserialize function for MessageDeltaTextFileCitationAnnotationObject */ +function deserializeMessageDeltaTextFileCitationAnnotationObject( + obj: MessageDeltaTextFileCitationAnnotationObjectOutput, +): MessageDeltaTextFileCitationAnnotationObject { + return { + index: obj["index"], + type: obj["type"], + fileCitation: !obj.file_citation + ? undefined + : { + fileId: obj.file_citation?.["file_id"], + quote: obj.file_citation?.["quote"], + }, + text: obj["text"], + startIndex: obj["start_index"], + endIndex: obj["end_index"], + }; +} + +/** deserialize function for MessageDeltaTextFilePathAnnotationObject */ +function deserializeMessageDeltaTextFilePathAnnotationObject( + obj: MessageDeltaTextFilePathAnnotationObjectOutput, +): MessageDeltaTextFilePathAnnotationObject { + return { + index: obj["index"], + type: obj["type"], + filePath: !obj.file_path + ? undefined + : { fileId: obj.file_path?.["file_id"] }, + startIndex: obj["start_index"], + endIndex: obj["end_index"], + text: obj["text"], + }; +} + +/** deserialize function for MessageDeltaTextAnnotationOutput */ +export function deserializeMessageDeltaTextAnnotationUnion( + obj: MessageDeltaTextAnnotationOutput, +): MessageDeltaTextAnnotationUnion { + switch (obj.type) { + case "file_citation": + return deserializeMessageDeltaTextFileCitationAnnotationObject( + obj as MessageDeltaTextFileCitationAnnotationObject, + ); + case "file_path": + return deserializeMessageDeltaTextFilePathAnnotationObject( + obj as MessageDeltaTextFilePathAnnotationObject, + ); + default: + return obj; + } +} + +/** deserialize function for RunStepDeltaMessageCreation */ +function deserializeRunStepDeltaMessageCreation( + obj: RunStepDeltaMessageCreationOutput, +): RunStepDeltaMessageCreation { + return { + type: obj["type"], + messageCreation: !obj.message_creation + ? undefined + : { messageId: obj.message_creation?.["message_id"] }, + }; +} + +/** deserialize function for RunStepDeltaToolCallObject */ +function deserializeRunStepDeltaToolCallObject( + obj: RunStepDeltaToolCallObjectOutput, +): RunStepDeltaToolCallObject { + return { + type: obj["type"], + toolCalls: + obj["tool_calls"] === undefined + ? obj["tool_calls"] + : obj["tool_calls"].map((p) => deserializeRunStepDeltaToolCallUnion(p)), + }; +} + +/** deserialize function for RunStepDeltaDetailOutput */ +export function deserializeRunStepDeltaDetailUnion( + obj: RunStepDeltaDetailOutput, +): RunStepDeltaDetailUnion { + switch (obj.type) { + case "message_creation": + return deserializeRunStepDeltaMessageCreation( + obj as RunStepDeltaMessageCreation, + ); + case "tool_calls": + return deserializeRunStepDeltaToolCallObject( + obj as RunStepDeltaToolCallObject, + ); + default: + return obj; + } +} + +/** deserialize function for RunStepDeltaFileSearchToolCall */ +function deserializeRunStepDeltaFileSearchToolCall( + obj: RunStepDeltaFileSearchToolCallOutput, +): RunStepDeltaFileSearchToolCall { + return { + index: obj["index"], + id: obj["id"], + type: obj["type"], + fileSearch: obj["file_search"], + }; +} + +/** deserialize function for RunStepDeltaCodeInterpreterToolCall */ +function deserializeRunStepDeltaCodeInterpreterToolCall( + obj: RunStepDeltaCodeInterpreterToolCallOutput, +): RunStepDeltaCodeInterpreterToolCall { + return { + index: obj["index"], + id: obj["id"], + type: obj["type"], + codeInterpreter: !obj.code_interpreter + ? undefined + : { + input: obj.code_interpreter?.["input"], + outputs: + obj.code_interpreter?.["outputs"] === undefined + ? obj.code_interpreter?.["outputs"] + : obj.code_interpreter?.["outputs"].map((p) => + deserializeRunStepDeltaCodeInterpreterOutputUnion(p), + ), + }, + }; +} + +/** deserialize function for RunStepDeltaToolCallOutput */ +export function deserializeRunStepDeltaToolCallUnion( + obj: RunStepDeltaToolCallOutput, +): RunStepDeltaToolCallUnion { + switch (obj.type) { + case "file_search": + return deserializeRunStepDeltaFileSearchToolCall( + obj as RunStepDeltaFileSearchToolCall, + ); + case "code_interpreter": + return deserializeRunStepDeltaCodeInterpreterToolCall( + obj as RunStepDeltaCodeInterpreterToolCall, + ); + default: + return obj; + } +} + +/** deserialize function for RunStepDeltaCodeInterpreterImageOutput */ +function deserializeRunStepDeltaCodeInterpreterImageOutput( + obj: RunStepDeltaCodeInterpreterImageOutputOutput, +): RunStepDeltaCodeInterpreterImageOutput { + return { + index: obj["index"], + type: obj["type"], + image: !obj.image ? undefined : { fileId: obj.image?.["file_id"] }, + }; +} + +/** deserialize function for RunStepDeltaCodeInterpreterOutputOutput */ +export function deserializeRunStepDeltaCodeInterpreterOutputUnion( + obj: RunStepDeltaCodeInterpreterOutputOutput, +): RunStepDeltaCodeInterpreterOutputUnion { + switch (obj.type) { + case "image": + return deserializeRunStepDeltaCodeInterpreterImageOutput( + obj as RunStepDeltaCodeInterpreterImageOutput, + ); + default: + return obj; + } +} diff --git a/sdk/openai/openai/src/utils/serializeUtil.ts b/sdk/openai/openai/src/utils/serializeUtil.ts deleted file mode 100644 index bbf2ccb5388d..000000000000 --- a/sdk/openai/openai/src/utils/serializeUtil.ts +++ /dev/null @@ -1,431 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -import { - ChatRequestUserMessage as ChatRequestUserMessageRest, - ChatRequestAssistantMessage as ChatRequestAssistantMessageRest, - ChatRequestToolMessage as ChatRequestToolMessageRest, - ChatRequestMessage as ChatRequestMessageRest, - ChatMessageImageContentItem as ChatMessageImageContentItemRest, - ChatMessageContentItem as ChatMessageContentItemRest, - AzureSearchChatExtensionConfiguration as AzureSearchChatExtensionConfigurationRest, - AzureMachineLearningIndexChatExtensionConfiguration as AzureMachineLearningIndexChatExtensionConfigurationRest, - AzureCosmosDBChatExtensionConfiguration as AzureCosmosDBChatExtensionConfigurationRest, - ElasticsearchChatExtensionConfiguration as ElasticsearchChatExtensionConfigurationRest, - PineconeChatExtensionConfiguration as PineconeChatExtensionConfigurationRest, - AzureChatExtensionConfiguration as AzureChatExtensionConfigurationRest, - OnYourDataConnectionStringAuthenticationOptions as OnYourDataConnectionStringAuthenticationOptionsRest, - OnYourDataKeyAndKeyIdAuthenticationOptions as OnYourDataKeyAndKeyIdAuthenticationOptionsRest, - OnYourDataEncodedApiKeyAuthenticationOptions as OnYourDataEncodedApiKeyAuthenticationOptionsRest, - OnYourDataAccessTokenAuthenticationOptions as OnYourDataAccessTokenAuthenticationOptionsRest, - OnYourDataUserAssignedManagedIdentityAuthenticationOptions as OnYourDataUserAssignedManagedIdentityAuthenticationOptionsRest, - OnYourDataAuthenticationOptions as OnYourDataAuthenticationOptionsRest, - OnYourDataEndpointVectorizationSource as OnYourDataEndpointVectorizationSourceRest, - OnYourDataDeploymentNameVectorizationSource as OnYourDataDeploymentNameVectorizationSourceRest, - OnYourDataModelIdVectorizationSource as OnYourDataModelIdVectorizationSourceRest, - OnYourDataVectorizationSource as OnYourDataVectorizationSourceRest, -} from "../rest/index.js"; -import { - ChatRequestUserMessage, - ChatRequestAssistantMessage, - ChatRequestToolMessage, - ChatRequestMessageUnion, - ChatMessageImageContentItem, - ChatMessageContentItemUnion, - AzureSearchChatExtensionConfiguration, - AzureMachineLearningIndexChatExtensionConfiguration, - AzureCosmosDBChatExtensionConfiguration, - ElasticsearchChatExtensionConfiguration, - PineconeChatExtensionConfiguration, - AzureChatExtensionConfigurationUnion, - OnYourDataConnectionStringAuthenticationOptions, - OnYourDataKeyAndKeyIdAuthenticationOptions, - OnYourDataEncodedApiKeyAuthenticationOptions, - OnYourDataAccessTokenAuthenticationOptions, - OnYourDataUserAssignedManagedIdentityAuthenticationOptions, - OnYourDataAuthenticationOptionsUnion, - OnYourDataEndpointVectorizationSource, - OnYourDataDeploymentNameVectorizationSource, - OnYourDataModelIdVectorizationSource, - OnYourDataVectorizationSourceUnion, -} from "../models/models.js"; -import { snakeCaseKeys } from "../api/util.js"; - -/** serialize function for ChatRequestUserMessage */ -function serializeChatRequestUserMessage(obj: ChatRequestUserMessage): ChatRequestUserMessageRest { - return { - role: obj["role"], - content: - typeof obj["content"] === "string" - ? obj["content"] - : obj["content"].map(serializeChatRequestContentItemUnion), - name: obj["name"], - }; -} - -/** serialize function for ChatMessageImageContentItem */ -function serializeChatRequestContentItemUnion( - obj: ChatMessageContentItemUnion, -): ChatMessageContentItemRest { - switch (obj.type) { - case "image_url": - return serializeChatMessageImageContentItem(obj as ChatMessageImageContentItem); - default: - return obj; - } -} -/** serialize function for ChatRequestAssistantMessage */ -function serializeChatRequestAssistantMessage( - obj: ChatRequestAssistantMessage, -): ChatRequestAssistantMessageRest { - if (obj.content === undefined) { - obj.content = null; - } - const { functionCall, toolCalls, ...rest } = obj; - return { - ...snakeCaseKeys(rest), - ...(!toolCalls || toolCalls.length === 0 ? {} : { tool_calls: toolCalls }), - ...(functionCall ? { function_call: functionCall } : {}), - }; -} - -/** serialize function for ChatRequestToolMessage */ -function serializeChatRequestToolMessage(obj: ChatRequestToolMessage): ChatRequestToolMessageRest { - return { - role: obj["role"], - content: obj["content"], - tool_call_id: obj["toolCallId"], - }; -} - -/** serialize function for ChatRequestMessageUnion */ -export function serializeChatRequestMessageUnion( - obj: ChatRequestMessageUnion, -): ChatRequestMessageRest { - switch (obj.role) { - case "user": - return serializeChatRequestUserMessage(obj as ChatRequestUserMessage); - case "assistant": - return serializeChatRequestAssistantMessage(obj as ChatRequestAssistantMessage); - case "tool": - return serializeChatRequestToolMessage(obj as ChatRequestToolMessage); - default: - return obj; - } -} - -/** serialize function for ChatMessageImageContentItem */ -function serializeChatMessageImageContentItem( - obj: ChatMessageImageContentItem, -): ChatMessageImageContentItemRest { - return { - type: obj["type"], - image_url: { url: obj.imageUrl["url"], detail: obj.imageUrl["detail"] }, - }; -} - -/** serialize function for ChatMessageContentItemUnion */ -export function serializeChatMessageContentItemUnion( - obj: ChatMessageContentItemUnion, -): ChatMessageContentItemRest { - switch (obj.type) { - case "image_url": - return serializeChatMessageImageContentItem(obj as ChatMessageImageContentItem); - default: - return obj; - } -} - -/** serialize function for AzureSearchChatExtensionConfiguration */ -function serializeAzureSearchChatExtensionConfiguration( - obj: AzureSearchChatExtensionConfiguration, -): AzureSearchChatExtensionConfigurationRest { - return { - type: obj["type"], - parameters: { - authentication: !obj.authentication - ? obj.authentication - : serializeOnYourDataAuthenticationOptionsUnion(obj.authentication), - top_n_documents: obj["topNDocuments"], - in_scope: obj["inScope"], - strictness: obj["strictness"], - role_information: obj["roleInformation"], - endpoint: obj["endpoint"], - index_name: obj["indexName"], - fields_mapping: !obj.fieldsMapping - ? undefined - : { - title_field: obj.fieldsMapping?.["titleField"], - url_field: obj.fieldsMapping?.["urlField"], - filepath_field: obj.fieldsMapping?.["filepathField"], - content_fields: obj.fieldsMapping?.["contentFields"], - content_fields_separator: obj.fieldsMapping?.["contentFieldsSeparator"], - vector_fields: obj.fieldsMapping?.["vectorFields"], - image_vector_fields: obj.fieldsMapping?.["imageVectorFields"], - }, - query_type: obj["queryType"], - semantic_configuration: obj["semanticConfiguration"], - filter: obj["filter"], - embedding_dependency: !obj.embeddingDependency - ? obj.embeddingDependency - : serializeOnYourDataVectorizationSourceUnion(obj.embeddingDependency), - }, - }; -} - -/** serialize function for AzureMachineLearningIndexChatExtensionConfiguration */ -function serializeAzureMachineLearningIndexChatExtensionConfiguration( - obj: AzureMachineLearningIndexChatExtensionConfiguration, -): AzureMachineLearningIndexChatExtensionConfigurationRest { - return { - type: obj["type"], - parameters: { - authentication: !obj.authentication - ? obj.authentication - : serializeOnYourDataAuthenticationOptionsUnion(obj.authentication), - top_n_documents: obj["topNDocuments"], - in_scope: obj["inScope"], - strictness: obj["strictness"], - role_information: obj["roleInformation"], - project_resource_id: obj["projectResourceId"], - name: obj["name"], - version: obj["version"], - filter: obj["filter"], - }, - }; -} - -/** serialize function for AzureCosmosDBChatExtensionConfiguration */ -function serializeAzureCosmosDBChatExtensionConfiguration( - obj: AzureCosmosDBChatExtensionConfiguration, -): AzureCosmosDBChatExtensionConfigurationRest { - return { - type: obj["type"], - parameters: { - authentication: !obj.authentication - ? obj.authentication - : serializeOnYourDataAuthenticationOptionsUnion(obj.authentication), - top_n_documents: obj["topNDocuments"], - in_scope: obj["inScope"], - strictness: obj["strictness"], - role_information: obj["roleInformation"], - database_name: obj["databaseName"], - container_name: obj["containerName"], - index_name: obj["indexName"], - fields_mapping: { - title_field: obj.fieldsMapping["titleField"], - url_field: obj.fieldsMapping["urlField"], - filepath_field: obj.fieldsMapping["filepathField"], - content_fields: obj.fieldsMapping["contentFields"], - content_fields_separator: obj.fieldsMapping["contentFieldsSeparator"], - vector_fields: obj.fieldsMapping["vectorFields"], - }, - embedding_dependency: serializeOnYourDataVectorizationSourceUnion(obj.embeddingDependency), - }, - }; -} - -/** serialize function for ElasticsearchChatExtensionConfiguration */ -function serializeElasticsearchChatExtensionConfiguration( - obj: ElasticsearchChatExtensionConfiguration, -): ElasticsearchChatExtensionConfigurationRest { - return { - type: obj["type"], - parameters: { - authentication: !obj.authentication - ? obj.authentication - : serializeOnYourDataAuthenticationOptionsUnion(obj.authentication), - top_n_documents: obj["topNDocuments"], - in_scope: obj["inScope"], - strictness: obj["strictness"], - role_information: obj["roleInformation"], - endpoint: obj["endpoint"], - index_name: obj["indexName"], - fields_mapping: !obj.fieldsMapping - ? undefined - : { - title_field: obj.fieldsMapping?.["titleField"], - url_field: obj.fieldsMapping?.["urlField"], - filepath_field: obj.fieldsMapping?.["filepathField"], - content_fields: obj.fieldsMapping?.["contentFields"], - content_fields_separator: obj.fieldsMapping?.["contentFieldsSeparator"], - vector_fields: obj.fieldsMapping?.["vectorFields"], - }, - query_type: obj["queryType"], - embedding_dependency: !obj.embeddingDependency - ? obj.embeddingDependency - : serializeOnYourDataVectorizationSourceUnion(obj.embeddingDependency), - }, - }; -} - -/** serialize function for PineconeChatExtensionConfiguration */ -function serializePineconeChatExtensionConfiguration( - obj: PineconeChatExtensionConfiguration, -): PineconeChatExtensionConfigurationRest { - return { - type: obj["type"], - parameters: { - authentication: !obj.authentication - ? obj.authentication - : serializeOnYourDataAuthenticationOptionsUnion(obj.authentication), - top_n_documents: obj["topNDocuments"], - in_scope: obj["inScope"], - strictness: obj["strictness"], - role_information: obj["roleInformation"], - environment: obj["environment"], - index_name: obj["indexName"], - fields_mapping: { - title_field: obj.fieldsMapping["titleField"], - url_field: obj.fieldsMapping["urlField"], - filepath_field: obj.fieldsMapping["filepathField"], - content_fields: obj.fieldsMapping["contentFields"], - content_fields_separator: obj.fieldsMapping["contentFieldsSeparator"], - }, - embedding_dependency: serializeOnYourDataVectorizationSourceUnion(obj.embeddingDependency), - }, - }; -} - -/** serialize function for AzureChatExtensionConfigurationUnion */ -export function serializeAzureChatExtensionConfigurationUnion( - obj: AzureChatExtensionConfigurationUnion, -): AzureChatExtensionConfigurationRest { - switch (obj.type) { - case "azure_search": - return serializeAzureSearchChatExtensionConfiguration( - obj as AzureSearchChatExtensionConfiguration, - ); - case "azure_ml_index": - return serializeAzureMachineLearningIndexChatExtensionConfiguration( - obj as AzureMachineLearningIndexChatExtensionConfiguration, - ); - case "azure_cosmos_db": - return serializeAzureCosmosDBChatExtensionConfiguration( - obj as AzureCosmosDBChatExtensionConfiguration, - ); - case "elasticsearch": - return serializeElasticsearchChatExtensionConfiguration( - obj as ElasticsearchChatExtensionConfiguration, - ); - case "pinecone": - return serializePineconeChatExtensionConfiguration(obj as PineconeChatExtensionConfiguration); - default: - return obj; - } -} - -/** serialize function for OnYourDataConnectionStringAuthenticationOptions */ -function serializeOnYourDataConnectionStringAuthenticationOptions( - obj: OnYourDataConnectionStringAuthenticationOptions, -): OnYourDataConnectionStringAuthenticationOptionsRest { - return { type: obj["type"], connection_string: obj["connectionString"] }; -} - -/** serialize function for OnYourDataKeyAndKeyIdAuthenticationOptions */ -function serializeOnYourDataKeyAndKeyIdAuthenticationOptions( - obj: OnYourDataKeyAndKeyIdAuthenticationOptions, -): OnYourDataKeyAndKeyIdAuthenticationOptionsRest { - return { type: obj["type"], key: obj["key"], key_id: obj["keyId"] }; -} - -/** serialize function for OnYourDataEncodedApiKeyAuthenticationOptions */ -function serializeOnYourDataEncodedApiKeyAuthenticationOptions( - obj: OnYourDataEncodedApiKeyAuthenticationOptions, -): OnYourDataEncodedApiKeyAuthenticationOptionsRest { - return { type: obj["type"], encoded_api_key: obj["encodedApiKey"] }; -} - -/** serialize function for OnYourDataAccessTokenAuthenticationOptions */ -function serializeOnYourDataAccessTokenAuthenticationOptions( - obj: OnYourDataAccessTokenAuthenticationOptions, -): OnYourDataAccessTokenAuthenticationOptionsRest { - return { type: obj["type"], access_token: obj["accessToken"] }; -} - -/** serialize function for OnYourDataUserAssignedManagedIdentityAuthenticationOptions */ -function serializeOnYourDataUserAssignedManagedIdentityAuthenticationOptions( - obj: OnYourDataUserAssignedManagedIdentityAuthenticationOptions, -): OnYourDataUserAssignedManagedIdentityAuthenticationOptionsRest { - return { - type: obj["type"], - managed_identity_resource_id: obj["managedIdentityResourceId"], - }; -} - -/** serialize function for OnYourDataAuthenticationOptionsUnion */ -export function serializeOnYourDataAuthenticationOptionsUnion( - obj: OnYourDataAuthenticationOptionsUnion, -): OnYourDataAuthenticationOptionsRest { - switch (obj.type) { - case "connection_string": - return serializeOnYourDataConnectionStringAuthenticationOptions( - obj as OnYourDataConnectionStringAuthenticationOptions, - ); - case "key_and_key_id": - return serializeOnYourDataKeyAndKeyIdAuthenticationOptions( - obj as OnYourDataKeyAndKeyIdAuthenticationOptions, - ); - case "encoded_api_key": - return serializeOnYourDataEncodedApiKeyAuthenticationOptions( - obj as OnYourDataEncodedApiKeyAuthenticationOptions, - ); - case "access_token": - return serializeOnYourDataAccessTokenAuthenticationOptions( - obj as OnYourDataAccessTokenAuthenticationOptions, - ); - case "user_assigned_managed_identity": - return serializeOnYourDataUserAssignedManagedIdentityAuthenticationOptions( - obj as OnYourDataUserAssignedManagedIdentityAuthenticationOptions, - ); - default: - return obj; - } -} - -/** serialize function for OnYourDataEndpointVectorizationSource */ -function serializeOnYourDataEndpointVectorizationSource( - obj: OnYourDataEndpointVectorizationSource, -): OnYourDataEndpointVectorizationSourceRest { - return { - type: obj["type"], - endpoint: obj["endpoint"], - authentication: serializeOnYourDataAuthenticationOptionsUnion(obj.authentication), - }; -} - -/** serialize function for OnYourDataDeploymentNameVectorizationSource */ -function serializeOnYourDataDeploymentNameVectorizationSource( - obj: OnYourDataDeploymentNameVectorizationSource, -): OnYourDataDeploymentNameVectorizationSourceRest { - return { type: obj["type"], deployment_name: obj["deploymentName"] }; -} - -/** serialize function for OnYourDataModelIdVectorizationSource */ -function serializeOnYourDataModelIdVectorizationSource( - obj: OnYourDataModelIdVectorizationSource, -): OnYourDataModelIdVectorizationSourceRest { - return { type: obj["type"], model_id: obj["modelId"] }; -} - -/** serialize function for OnYourDataVectorizationSourceUnion */ -export function serializeOnYourDataVectorizationSourceUnion( - obj: OnYourDataVectorizationSourceUnion, -): OnYourDataVectorizationSourceRest { - switch (obj.type) { - case "endpoint": - return serializeOnYourDataEndpointVectorizationSource( - obj as OnYourDataEndpointVectorizationSource, - ); - case "deployment_name": - return serializeOnYourDataDeploymentNameVectorizationSource( - obj as OnYourDataDeploymentNameVectorizationSource, - ); - case "model_id": - return serializeOnYourDataModelIdVectorizationSource( - obj as OnYourDataModelIdVectorizationSource, - ); - default: - return obj; - } -} diff --git a/sdk/openai/openai/tsconfig.json b/sdk/openai/openai/tsconfig.json index 233a0a863d4a..335dccb6436d 100644 --- a/sdk/openai/openai/tsconfig.json +++ b/sdk/openai/openai/tsconfig.json @@ -3,12 +3,25 @@ "compilerOptions": { "outDir": "./dist-esm", "declarationDir": "./types", - "paths": { "@azure/openai": ["./src/index.js"] }, + "paths": { + "@azure/openai": [ + "./src/index.js" + ] + }, "module": "NodeNext", "moduleResolution": "NodeNext", - "lib": ["esnext", "dom"], + "lib": [ + "esnext", + "dom" + ], "rootDir": "." }, - "ts-node": { "esm": true }, - "include": ["src/**/*.ts", "test/**/*.ts", "samples-dev/**/*.ts"] -} + "ts-node": { + "esm": true + }, + "include": [ + "src/**/*.ts", + "test/**/*.ts", + "samples-dev/**/*.ts" + ] +} \ No newline at end of file diff --git a/sdk/openai/openai/tsp-location.yaml b/sdk/openai/openai/tsp-location.yaml index 0630e0013e84..c93182113de7 100644 --- a/sdk/openai/openai/tsp-location.yaml +++ b/sdk/openai/openai/tsp-location.yaml @@ -1,3 +1,5 @@ -directory: specification/cognitiveservices/OpenAI.Inference -commit: 64813b901ca3dc81a9ce3c7b5c6d7430c78aa8a5 +commit: c397ebac46fe04bf5896faa2bea5888928efe8d2 +additionalDirectories: [] +directory: specification/ai/OpenAI.Assistants repo: Azure/azure-rest-api-specs +