From edbc23cd73c981b2df2f1dd43a1d34447aa30cf1 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 9 Jun 2023 01:16:06 +0000 Subject: [PATCH 01/66] ci: regenerated with OpenAPI Doc 1.2.0, Speakeay CLI 1.45.2 --- .gitignore | 2 + README.md | 88 +- RELEASES.md | 10 +- USAGE.md | 18 +- docs/gpt/README.md | 8 + docs/openai/README.md | 910 ++ files.gen | 11 + gen.yaml | 9 +- package-lock.json | 8241 ++++++++++++++--- package.json | 11 +- src/internal/utils/headers.ts | 23 +- src/internal/utils/pathparams.ts | 30 +- src/internal/utils/queryparams.ts | 141 +- src/internal/utils/requestbody.ts | 130 +- src/internal/utils/security.ts | 2 +- src/internal/utils/utils.ts | 115 +- src/sdk/models/operations/cancelfinetune.ts | 41 +- src/sdk/models/operations/createanswer.ts | 22 +- .../models/operations/createchatcompletion.ts | 22 +- .../models/operations/createclassification.ts | 22 +- src/sdk/models/operations/createcompletion.ts | 22 +- src/sdk/models/operations/createedit.ts | 22 +- src/sdk/models/operations/createembedding.ts | 22 +- src/sdk/models/operations/createfile.ts | 23 +- src/sdk/models/operations/createfinetune.ts | 23 +- src/sdk/models/operations/createimage.ts | 23 +- src/sdk/models/operations/createimageedit.ts | 23 +- .../models/operations/createimagevariation.ts | 23 +- src/sdk/models/operations/createmoderation.ts | 22 +- src/sdk/models/operations/createsearch.ts | 38 +- .../models/operations/createtranscription.ts | 22 +- .../models/operations/createtranslation.ts | 22 +- src/sdk/models/operations/deletefile.ts | 34 +- src/sdk/models/operations/deletemodel.ts | 34 +- src/sdk/models/operations/downloadfile.ts | 34 +- src/sdk/models/operations/listengines.ts | 22 +- src/sdk/models/operations/listfiles.ts | 22 +- .../models/operations/listfinetuneevents.ts | 72 +- src/sdk/models/operations/listfinetunes.ts | 22 +- src/sdk/models/operations/listmodels.ts | 22 +- src/sdk/models/operations/retrieveengine.ts | 41 +- src/sdk/models/operations/retrievefile.ts | 35 +- src/sdk/models/operations/retrievefinetune.ts | 41 +- src/sdk/models/operations/retrievemodel.ts | 35 +- .../shared/chatcompletionrequestmessage.ts | 44 +- .../shared/chatcompletionresponsemessage.ts | 32 +- src/sdk/models/shared/createanswerrequest.ts | 266 +- src/sdk/models/shared/createanswerresponse.ts | 60 +- .../shared/createchatcompletionrequest.ts | 168 +- .../shared/createchatcompletionresponse.ts | 78 +- .../shared/createclassificationrequest.ts | 148 +- .../shared/createclassificationresponse.ts | 66 +- .../models/shared/createcompletionrequest.ts | 354 +- .../models/shared/createcompletionresponse.ts | 124 +- src/sdk/models/shared/createeditrequest.ts | 72 +- src/sdk/models/shared/createeditresponse.ts | 108 +- .../models/shared/createembeddingrequest.ts | 30 +- .../models/shared/createembeddingresponse.ts | 64 +- src/sdk/models/shared/createfilerequest.ts | 48 +- .../models/shared/createfinetunerequest.ts | 346 +- .../models/shared/createimageeditrequest.ts | 74 +- src/sdk/models/shared/createimagerequest.ts | 76 +- .../shared/createimagevariationrequest.ts | 34 +- .../models/shared/createmoderationrequest.ts | 34 +- .../models/shared/createmoderationresponse.ts | 134 +- src/sdk/models/shared/createsearchrequest.ts | 110 +- src/sdk/models/shared/createsearchresponse.ts | 38 +- .../shared/createtranscriptionrequest.ts | 104 +- .../shared/createtranscriptionresponse.ts | 6 +- .../models/shared/createtranslationrequest.ts | 96 +- .../shared/createtranslationresponse.ts | 6 +- src/sdk/models/shared/deletefileresponse.ts | 18 +- src/sdk/models/shared/deletemodelresponse.ts | 18 +- src/sdk/models/shared/engine.ts | 27 + src/sdk/models/shared/finetune.ts | 69 + src/sdk/models/shared/finetuneevent.ts | 24 + src/sdk/models/shared/imagesresponse.ts | 30 + src/sdk/models/shared/index.ts | 6 + src/sdk/models/shared/listenginesresponse.ts | 16 +- src/sdk/models/shared/listfilesresponse.ts | 16 +- .../shared/listfinetuneeventsresponse.ts | 16 +- .../models/shared/listfinetunesresponse.ts | 16 +- src/sdk/models/shared/listmodelsresponse.ts | 16 +- src/sdk/models/shared/model.ts | 27 + src/sdk/models/shared/openaifile.ts | 43 + src/sdk/openai.ts | 3263 +++---- src/sdk/sdk.ts | 97 +- src/sdk/types/index.ts | 5 + src/sdk/types/rfcdate.ts | 35 + 89 files changed, 12072 insertions(+), 4945 deletions(-) create mode 100755 .gitignore create mode 100755 docs/gpt/README.md create mode 100755 docs/openai/README.md create mode 100755 src/sdk/models/shared/engine.ts create mode 100755 src/sdk/models/shared/finetune.ts create mode 100755 src/sdk/models/shared/finetuneevent.ts create mode 100755 src/sdk/models/shared/imagesresponse.ts create mode 100755 src/sdk/models/shared/model.ts create mode 100755 src/sdk/models/shared/openaifile.ts create mode 100755 src/sdk/types/index.ts create mode 100755 src/sdk/types/rfcdate.ts diff --git a/.gitignore b/.gitignore new file mode 100755 index 0000000..1eae0cf --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +dist/ +node_modules/ diff --git a/README.md b/README.md index 313916a..7691dac 100755 --- a/README.md +++ b/README.md @@ -40,39 +40,35 @@ Authorization: Bearer YOUR_API_KEY ## SDK Example Usage ```typescript -import { - CancelFineTuneRequest, - CancelFineTuneResponse -} from "@speakeasy-api/openai/dist/sdk/models/operations"; - -import { AxiosError } from "axios"; import { Gpt } from "@speakeasy-api/openai"; +import { CancelFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + const sdk = new Gpt(); -const req: CancelFineTuneRequest = { +sdk.openAI.cancelFineTune({ fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", -}; - -sdk.openAI.cancelFineTune(req).then((res: CancelFineTuneResponse | AxiosError) => { - // handle response +}).then((res: CancelFineTuneResponse) => { + if (res.statusCode == 200) { + // handle response + } }); ``` -## SDK Available Operations +## Available Resources and Operations -### openAI +### [openAI](docs/openai/README.md) -* `cancelFineTune` - Immediately cancel a fine-tune job. +* [cancelFineTune](docs/openai/README.md#cancelfinetune) - Immediately cancel a fine-tune job. -* `createAnswer` - Answers the specified question using the provided documents and examples. +* [~~createAnswer~~](docs/openai/README.md#createanswer) - Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). - -* `createChatCompletion` - Creates a completion for the chat message -* `createClassification` - Classifies the specified `query` using provided examples. + :warning: **Deprecated** +* [createChatCompletion](docs/openai/README.md#createchatcompletion) - Creates a completion for the chat message +* [~~createClassification~~](docs/openai/README.md#createclassification) - Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples @@ -81,47 +77,47 @@ are combined with the query to construct a prompt to produce the final label via Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. + :warning: **Deprecated** +* [createCompletion](docs/openai/README.md#createcompletion) - Creates a completion for the provided prompt and parameters +* [createEdit](docs/openai/README.md#createedit) - Creates a new edit for the provided input, instruction, and parameters. +* [createEmbedding](docs/openai/README.md#createembedding) - Creates an embedding vector representing the input text. +* [createFile](docs/openai/README.md#createfile) - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. -* `createCompletion` - Creates a completion for the provided prompt and parameters -* `createEdit` - Creates a new edit for the provided input, instruction, and parameters. -* `createEmbedding` - Creates an embedding vector representing the input text. -* `createFile` - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. - -* `createFineTune` - Creates a job that fine-tunes a specified model from a given dataset. +* [createFineTune](docs/openai/README.md#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about Fine-tuning](/docs/guides/fine-tuning) -* `createImage` - Creates an image given a prompt. -* `createImageEdit` - Creates an edited or extended image given an original image and a prompt. -* `createImageVariation` - Creates a variation of a given image. -* `createModeration` - Classifies if text violates OpenAI's Content Policy -* `createSearch` - The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. +* [createImage](docs/openai/README.md#createimage) - Creates an image given a prompt. +* [createImageEdit](docs/openai/README.md#createimageedit) - Creates an edited or extended image given an original image and a prompt. +* [createImageVariation](docs/openai/README.md#createimagevariation) - Creates a variation of a given image. +* [createModeration](docs/openai/README.md#createmoderation) - Classifies if text violates OpenAI's Content Policy +* [~~createSearch~~](docs/openai/README.md#createsearch) - The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. - -* `createTranscription` - Transcribes audio into the input language. -* `createTranslation` - Translates audio into into English. -* `deleteFile` - Delete a file. -* `deleteModel` - Delete a fine-tuned model. You must have the Owner role in your organization. -* `downloadFile` - Returns the contents of the specified file -* `listEngines` - Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. -* `listFiles` - Returns a list of files that belong to the user's organization. -* `listFineTuneEvents` - Get fine-grained status updates for a fine-tune job. - -* `listFineTunes` - List your organization's fine-tuning jobs - -* `listModels` - Lists the currently available models, and provides basic information about each one such as the owner and availability. -* `retrieveEngine` - Retrieves a model instance, providing basic information about it such as the owner and availability. -* `retrieveFile` - Returns information about a specific file. -* `retrieveFineTune` - Gets info about the fine-tune job. + :warning: **Deprecated** +* [createTranscription](docs/openai/README.md#createtranscription) - Transcribes audio into the input language. +* [createTranslation](docs/openai/README.md#createtranslation) - Translates audio into into English. +* [deleteFile](docs/openai/README.md#deletefile) - Delete a file. +* [deleteModel](docs/openai/README.md#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization. +* [downloadFile](docs/openai/README.md#downloadfile) - Returns the contents of the specified file +* [~~listEngines~~](docs/openai/README.md#listengines) - Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. :warning: **Deprecated** +* [listFiles](docs/openai/README.md#listfiles) - Returns a list of files that belong to the user's organization. +* [listFineTuneEvents](docs/openai/README.md#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. + +* [listFineTunes](docs/openai/README.md#listfinetunes) - List your organization's fine-tuning jobs + +* [listModels](docs/openai/README.md#listmodels) - Lists the currently available models, and provides basic information about each one such as the owner and availability. +* [~~retrieveEngine~~](docs/openai/README.md#retrieveengine) - Retrieves a model instance, providing basic information about it such as the owner and availability. :warning: **Deprecated** +* [retrieveFile](docs/openai/README.md#retrievefile) - Returns information about a specific file. +* [retrieveFineTune](docs/openai/README.md#retrievefinetune) - Gets info about the fine-tune job. [Learn more about Fine-tuning](/docs/guides/fine-tuning) -* `retrieveModel` - Retrieves a model instance, providing basic information about the model such as the owner and permissioning. +* [retrieveModel](docs/openai/README.md#retrievemodel) - Retrieves a model instance, providing basic information about the model such as the owner and permissioning. ### SDK Generated by [Speakeasy](https://docs.speakeasyapi.dev/docs/using-speakeasy/client-sdks) diff --git a/RELEASES.md b/RELEASES.md index 799f006..24deca2 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -148,4 +148,12 @@ Based on: - OpenAPI Doc 1.2.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.19.2 (2.16.5) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v1.9.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.9.2 - . \ No newline at end of file +- [NPM v1.9.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.9.2 - . + +## 2023-06-09 01:15:43 +### Changes +Based on: +- OpenAPI Doc 1.2.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.45.2 (2.37.2) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v1.10.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.10.0 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index c25d8ad..1eedb9f 100755 --- a/USAGE.md +++ b/USAGE.md @@ -1,20 +1,16 @@ ```typescript -import { - CancelFineTuneRequest, - CancelFineTuneResponse -} from "@speakeasy-api/openai/dist/sdk/models/operations"; - -import { AxiosError } from "axios"; import { Gpt } from "@speakeasy-api/openai"; +import { CancelFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + const sdk = new Gpt(); -const req: CancelFineTuneRequest = { +sdk.openAI.cancelFineTune({ fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", -}; - -sdk.openAI.cancelFineTune(req).then((res: CancelFineTuneResponse | AxiosError) => { - // handle response +}).then((res: CancelFineTuneResponse) => { + if (res.statusCode == 200) { + // handle response + } }); ``` \ No newline at end of file diff --git a/docs/gpt/README.md b/docs/gpt/README.md new file mode 100755 index 0000000..411338d --- /dev/null +++ b/docs/gpt/README.md @@ -0,0 +1,8 @@ +# Gpt SDK + +## Overview + +OpenAI API: APIs for sampling from and fine-tuning language models + +### Available Operations + diff --git a/docs/openai/README.md b/docs/openai/README.md new file mode 100755 index 0000000..3cb8865 --- /dev/null +++ b/docs/openai/README.md @@ -0,0 +1,910 @@ +# openAI + +## Overview + +The OpenAI REST API + +### Available Operations + +* [cancelFineTune](#cancelfinetune) - Immediately cancel a fine-tune job. + +* [~~createAnswer~~](#createanswer) - Answers the specified question using the provided documents and examples. + +The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). + :warning: **Deprecated** +* [createChatCompletion](#createchatcompletion) - Creates a completion for the chat message +* [~~createClassification~~](#createclassification) - Classifies the specified `query` using provided examples. + +The endpoint first [searches](/docs/api-reference/searches) over the labeled examples +to select the ones most relevant for the particular query. Then, the relevant examples +are combined with the query to construct a prompt to produce the final label via the +[completions](/docs/api-reference/completions) endpoint. + +Labeled examples can be provided via an uploaded `file`, or explicitly listed in the +request using the `examples` parameter for quick tests and small scale use cases. + :warning: **Deprecated** +* [createCompletion](#createcompletion) - Creates a completion for the provided prompt and parameters +* [createEdit](#createedit) - Creates a new edit for the provided input, instruction, and parameters. +* [createEmbedding](#createembedding) - Creates an embedding vector representing the input text. +* [createFile](#createfile) - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. + +* [createFineTune](#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. + +Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + +[Learn more about Fine-tuning](/docs/guides/fine-tuning) + +* [createImage](#createimage) - Creates an image given a prompt. +* [createImageEdit](#createimageedit) - Creates an edited or extended image given an original image and a prompt. +* [createImageVariation](#createimagevariation) - Creates a variation of a given image. +* [createModeration](#createmoderation) - Classifies if text violates OpenAI's Content Policy +* [~~createSearch~~](#createsearch) - The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. + +To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. + +The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. + :warning: **Deprecated** +* [createTranscription](#createtranscription) - Transcribes audio into the input language. +* [createTranslation](#createtranslation) - Translates audio into into English. +* [deleteFile](#deletefile) - Delete a file. +* [deleteModel](#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization. +* [downloadFile](#downloadfile) - Returns the contents of the specified file +* [~~listEngines~~](#listengines) - Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. :warning: **Deprecated** +* [listFiles](#listfiles) - Returns a list of files that belong to the user's organization. +* [listFineTuneEvents](#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. + +* [listFineTunes](#listfinetunes) - List your organization's fine-tuning jobs + +* [listModels](#listmodels) - Lists the currently available models, and provides basic information about each one such as the owner and availability. +* [~~retrieveEngine~~](#retrieveengine) - Retrieves a model instance, providing basic information about it such as the owner and availability. :warning: **Deprecated** +* [retrieveFile](#retrievefile) - Returns information about a specific file. +* [retrieveFineTune](#retrievefinetune) - Gets info about the fine-tune job. + +[Learn more about Fine-tuning](/docs/guides/fine-tuning) + +* [retrieveModel](#retrievemodel) - Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + +## cancelFineTune + +Immediately cancel a fine-tune job. + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CancelFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.cancelFineTune({ + fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", +}).then((res: CancelFineTuneResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## ~~createAnswer~~ + +Answers the specified question using the provided documents and examples. + +The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). + + +> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateAnswerResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createAnswer({ + documents: [ + "provident", + "distinctio", + "quibusdam", + ], + examples: [ + [ + "corrupti", + "illum", + "vel", + "error", + ], + [ + "suscipit", + "iure", + "magnam", + ], + [ + "ipsa", + "delectus", + "tempora", + "suscipit", + ], + ], + examplesContext: "Ottawa, Canada's capital, is located in the east of southern Ontario, near the city of Montréal and the U.S. border.", + expand: [ + "minus", + "placeat", + ], + file: "voluptatum", + logitBias: "iusto", + logprobs: 568045, + maxRerank: 392785, + maxTokens: 925597, + model: "temporibus", + n: 71036, + question: "What is the capital of Japan?", + returnMetadata: "quis", + returnPrompt: false, + searchModel: "veritatis", + stop: [ + "["\n"]", + ], + temperature: 3682.41, + user: "repellendus", +}).then((res: CreateAnswerResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## createChatCompletion + +Creates a completion for the chat message + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateChatCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; +import { ChatCompletionRequestMessageRole, ChatCompletionResponseMessageRole } from "@speakeasy-api/openai/dist/sdk/models/shared"; + +const sdk = new Gpt(); + +sdk.openAI.createChatCompletion({ + frequencyPenalty: 9571.56, + logitBias: { + "odit": "at", + "at": "maiores", + "molestiae": "quod", + "quod": "esse", + }, + maxTokens: 520478, + messages: [ + { + content: "dolorum", + name: "Antoinette Nikolaus", + role: ChatCompletionRequestMessageRole.User, + }, + { + content: "hic", + name: "Everett Breitenberg", + role: ChatCompletionRequestMessageRole.System, + }, + { + content: "qui", + name: "Jonathon Klocko", + role: ChatCompletionRequestMessageRole.System, + }, + { + content: "perferendis", + name: "Faye Cormier", + role: ChatCompletionRequestMessageRole.User, + }, + ], + model: "laboriosam", + n: 1, + presencePenalty: 9437.49, + stop: [ + "in", + "corporis", + "iste", + ], + stream: false, + temperature: 1, + topP: 1, + user: "iure", +}).then((res: CreateChatCompletionResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## ~~createClassification~~ + +Classifies the specified `query` using provided examples. + +The endpoint first [searches](/docs/api-reference/searches) over the labeled examples +to select the ones most relevant for the particular query. Then, the relevant examples +are combined with the query to construct a prompt to produce the final label via the +[completions](/docs/api-reference/completions) endpoint. + +Labeled examples can be provided via an uploaded `file`, or explicitly listed in the +request using the `examples` parameter for quick tests and small scale use cases. + + +> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateClassificationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createClassification({ + examples: [ + [ + "architecto", + "ipsa", + "reiciendis", + ], + [ + "mollitia", + "laborum", + "dolores", + ], + [ + "corporis", + ], + [ + "nobis", + ], + ], + expand: "enim", + file: "omnis", + labels: [ + "minima", + "excepturi", + ], + logitBias: "accusantium", + logprobs: "iure", + maxExamples: 634274, + model: "doloribus", + query: "The plot is not very attractive.", + returnMetadata: "sapiente", + returnPrompt: "architecto", + searchModel: "mollitia", + temperature: 0, + user: "dolorem", +}).then((res: CreateClassificationResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## createCompletion + +Creates a completion for the provided prompt and parameters + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createCompletion({ + bestOf: 635059, + echo: false, + frequencyPenalty: 1613.09, + logitBias: { + "mollitia": "occaecati", + "numquam": "commodi", + "quam": "molestiae", + "velit": "error", + }, + logprobs: 158969, + maxTokens: 16, + model: "quis", + n: 1, + presencePenalty: 1103.75, + prompt: [ + 317202, + 138183, + 778346, + ], + stop: " +", + stream: false, + suffix: "test.", + temperature: 1, + topP: 1, + user: "user-1234", +}).then((res: CreateCompletionResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## createEdit + +Creates a new edit for the provided input, instruction, and parameters. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateEditResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createEdit({ + input: "What day of the wek is it?", + instruction: "Fix the spelling mistakes.", + model: "tenetur", + n: 1, + temperature: 1, + topP: 1, +}).then((res: CreateEditResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## createEmbedding + +Creates an embedding vector representing the input text. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateEmbeddingResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createEmbedding({ + input: [ + "This is a test.", + "This is a test.", + "This is a test.", + ], + model: "possimus", + user: "aut", +}).then((res: CreateEmbeddingResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## createFile + +Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createFile({ + file: { + content: "quasi".encode(), + file: "error", + }, + purpose: "temporibus", +}).then((res: CreateFileResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## createFineTune + +Creates a job that fine-tunes a specified model from a given dataset. + +Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + +[Learn more about Fine-tuning](/docs/guides/fine-tuning) + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createFineTune({ + batchSize: 673660, + classificationBetas: [ + 9719.45, + ], + classificationNClasses: 976460, + classificationPositiveClass: "vero", + computeClassificationMetrics: false, + learningRateMultiplier: 4686.51, + model: "praesentium", + nEpochs: 976762, + promptLossWeight: 557.14, + suffix: "omnis", + trainingFile: "file-ajSREls59WBbvgSzJSVWxMCB", + validationFile: "file-XjSREls59WBbvgSzJSVWxMCa", +}).then((res: CreateFineTuneResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## createImage + +Creates an image given a prompt. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateImageResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; +import { CreateImageRequestResponseFormat, CreateImageRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; + +const sdk = new Gpt(); + +sdk.openAI.createImage({ + n: 1, + prompt: "A cute baby sea otter", + responseFormat: CreateImageRequestResponseFormat.Url, + size: CreateImageRequestSize.OneThousandAndTwentyFourx1024, + user: "voluptate", +}).then((res: CreateImageResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## createImageEdit + +Creates an edited or extended image given an original image and a prompt. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateImageEditResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createImageEdit({ + image: { + content: "cum".encode(), + image: "perferendis", + }, + mask: { + content: "doloremque".encode(), + mask: "reprehenderit", + }, + n: "ut", + prompt: "A cute baby sea otter wearing a beret", + responseFormat: "maiores", + size: "dicta", + user: "corporis", +}).then((res: CreateImageEditResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## createImageVariation + +Creates a variation of a given image. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateImageVariationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createImageVariation({ + image: { + content: "dolore".encode(), + image: "iusto", + }, + n: "dicta", + responseFormat: "harum", + size: "enim", + user: "accusamus", +}).then((res: CreateImageVariationResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## createModeration + +Classifies if text violates OpenAI's Content Policy + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateModerationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createModeration({ + input: "I want to kill them.", + model: "text-moderation-stable", +}).then((res: CreateModerationResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## ~~createSearch~~ + +The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. + +To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. + +The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. + + +> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateSearchResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createSearch({ + createSearchRequest: { + documents: [ + "quae", + "ipsum", + "quidem", + "molestias", + ], + file: "excepturi", + maxRerank: 865103, + query: "the president", + returnMetadata: false, + user: "modi", + }, + engineId: "davinci", +}).then((res: CreateSearchResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## createTranscription + +Transcribes audio into the input language. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateTranscriptionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createTranscription({ + file: { + content: "praesentium".encode(), + file: "rem", + }, + language: "voluptates", + model: "quasi", + prompt: "repudiandae", + responseFormat: "sint", + temperature: 831.12, +}).then((res: CreateTranscriptionResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## createTranslation + +Translates audio into into English. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateTranslationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createTranslation({ + file: { + content: "itaque".encode(), + file: "incidunt", + }, + model: "enim", + prompt: "consequatur", + responseFormat: "est", + temperature: 8423.42, +}).then((res: CreateTranslationResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## deleteFile + +Delete a file. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { DeleteFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.deleteFile({ + fileId: "explicabo", +}).then((res: DeleteFileResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## deleteModel + +Delete a fine-tuned model. You must have the Owner role in your organization. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { DeleteModelResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.deleteModel({ + model: "curie:ft-acmeco-2021-03-03-21-44-20", +}).then((res: DeleteModelResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## downloadFile + +Returns the contents of the specified file + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { DownloadFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.downloadFile({ + fileId: "deserunt", +}).then((res: DownloadFileResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## ~~listEngines~~ + +Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. + +> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { ListEnginesResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.listEngines().then((res: ListEnginesResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## listFiles + +Returns a list of files that belong to the user's organization. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { ListFilesResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.listFiles().then((res: ListFilesResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## listFineTuneEvents + +Get fine-grained status updates for a fine-tune job. + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { ListFineTuneEventsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.listFineTuneEvents({ + fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + stream: false, +}).then((res: ListFineTuneEventsResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## listFineTunes + +List your organization's fine-tuning jobs + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { ListFineTunesResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.listFineTunes().then((res: ListFineTunesResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## listModels + +Lists the currently available models, and provides basic information about each one such as the owner and availability. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { ListModelsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.listModels().then((res: ListModelsResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## ~~retrieveEngine~~ + +Retrieves a model instance, providing basic information about it such as the owner and availability. + +> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { RetrieveEngineResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.retrieveEngine({ + engineId: "davinci", +}).then((res: RetrieveEngineResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## retrieveFile + +Returns information about a specific file. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { RetrieveFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.retrieveFile({ + fileId: "distinctio", +}).then((res: RetrieveFileResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## retrieveFineTune + +Gets info about the fine-tune job. + +[Learn more about Fine-tuning](/docs/guides/fine-tuning) + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { RetrieveFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.retrieveFineTune({ + fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", +}).then((res: RetrieveFineTuneResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +## retrieveModel + +Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { RetrieveModelResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.retrieveModel({ + model: "text-davinci-001", +}).then((res: RetrieveModelResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` diff --git a/files.gen b/files.gen index e54d544..dba9da9 100755 --- a/files.gen +++ b/files.gen @@ -1,6 +1,7 @@ src/sdk/openai.ts src/sdk/sdk.ts .eslintrc.yml +.gitignore jest.config.js package-lock.json package.json @@ -15,6 +16,8 @@ src/internal/utils/retries.ts src/internal/utils/security.ts src/internal/utils/utils.ts src/sdk/index.ts +src/sdk/types/index.ts +src/sdk/types/rfcdate.ts tsconfig.json src/sdk/models/operations/cancelfinetune.ts src/sdk/models/operations/createanswer.ts @@ -45,6 +48,9 @@ src/sdk/models/operations/retrievefile.ts src/sdk/models/operations/retrievefinetune.ts src/sdk/models/operations/retrievemodel.ts src/sdk/models/operations/index.ts +src/sdk/models/shared/finetune.ts +src/sdk/models/shared/openaifile.ts +src/sdk/models/shared/finetuneevent.ts src/sdk/models/shared/createanswerresponse.ts src/sdk/models/shared/createanswerrequest.ts src/sdk/models/shared/createchatcompletionresponse.ts @@ -61,6 +67,7 @@ src/sdk/models/shared/createembeddingresponse.ts src/sdk/models/shared/createembeddingrequest.ts src/sdk/models/shared/createfilerequest.ts src/sdk/models/shared/createfinetunerequest.ts +src/sdk/models/shared/imagesresponse.ts src/sdk/models/shared/createimagerequest.ts src/sdk/models/shared/createimageeditrequest.ts src/sdk/models/shared/createimagevariationrequest.ts @@ -75,9 +82,13 @@ src/sdk/models/shared/createtranslationrequest.ts src/sdk/models/shared/deletefileresponse.ts src/sdk/models/shared/deletemodelresponse.ts src/sdk/models/shared/listenginesresponse.ts +src/sdk/models/shared/engine.ts src/sdk/models/shared/listfilesresponse.ts src/sdk/models/shared/listfinetuneeventsresponse.ts src/sdk/models/shared/listfinetunesresponse.ts src/sdk/models/shared/listmodelsresponse.ts +src/sdk/models/shared/model.ts src/sdk/models/shared/index.ts +docs/gpt/README.md +docs/openai/README.md USAGE.md \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index b37a596..d843212 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,14 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 5399f7767be93d4a4b8cecb9bbc687b3 docVersion: 1.2.0 - speakeasyVersion: 1.19.2 - generationVersion: 2.16.5 + speakeasyVersion: 1.45.2 + generationVersion: 2.37.2 generation: - telemetryEnabled: false sdkClassName: gpt sdkFlattening: true singleTagPerOp: false + telemetryEnabled: false typescript: - version: 1.9.2 + version: 1.10.0 author: speakeasy-openai + maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 61b416a..6ddccf4 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "1.9.2", + "version": "1.10.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "1.9.2", + "version": "1.10.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", @@ -15,717 +15,688 @@ }, "devDependencies": { "@types/node": "^18.11.5", + "@types/jsonpath": "^0.2.0", "@typescript-eslint/eslint-plugin": "^5.56.0", "@typescript-eslint/parser": "^5.56.0", "eslint": "^8.36.0", "typescript": "^4.8.4" } }, - "node_modules/@eslint-community/eslint-utils": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", - "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "node_modules/@ampproject/remapping": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", + "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", "dev": true, "dependencies": { - "eslint-visitor-keys": "^3.3.0" + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.21.4", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.21.4.tgz", + "integrity": "sha512-LYvhNKfwWSPpocw8GI7gpK2nq3HSDuEPC/uSYaALSJu9xjsalaaYFOq0Pwt5KmVqwEbZlDu81aLXwBOmD/Fv9g==", + "dev": true, + "dependencies": { + "@babel/highlight": "^7.18.6" }, - "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + "engines": { + "node": ">=6.9.0" } }, - "node_modules/@eslint-community/regexpp": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.4.1.tgz", - "integrity": "sha512-BISJ6ZE4xQsuL/FmsyRaiffpq977bMlsKfGHTQrOGFErfByxIe6iZTxPf/00Zon9b9a7iUykfQwejN3s2ZW/Bw==", + "node_modules/@babel/compat-data": { + "version": "7.22.3", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.3.tgz", + "integrity": "sha512-aNtko9OPOwVESUFp3MZfD8Uzxl7JzSeJpd7npIoxCasU37PFbAQRpKglkaKwlHOyeJdrREpo8TW8ldrkYWwvIQ==", "dev": true, "engines": { - "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + "node": ">=6.9.0" } }, - "node_modules/@eslint/eslintrc": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.0.1.tgz", - "integrity": "sha512-eFRmABvW2E5Ho6f5fHLqgena46rOj7r7OKHYfLElqcBfGFHHpjBhivyi5+jOEQuSpdc/1phIZJlbC2te+tZNIw==", + "node_modules/@babel/core": { + "version": "7.22.1", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.1.tgz", + "integrity": "sha512-Hkqu7J4ynysSXxmAahpN1jjRwVJ+NdpraFLIWflgjpVob3KNyK3/tIUc7Q7szed8WMp0JNa7Qtd1E9Oo22F9gA==", "dev": true, "dependencies": { - "ajv": "^6.12.4", - "debug": "^4.3.2", - "espree": "^9.5.0", - "globals": "^13.19.0", - "ignore": "^5.2.0", - "import-fresh": "^3.2.1", - "js-yaml": "^4.1.0", - "minimatch": "^3.1.2", - "strip-json-comments": "^3.1.1" + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.21.4", + "@babel/generator": "^7.22.0", + "@babel/helper-compilation-targets": "^7.22.1", + "@babel/helper-module-transforms": "^7.22.1", + "@babel/helpers": "^7.22.0", + "@babel/parser": "^7.22.0", + "@babel/template": "^7.21.9", + "@babel/traverse": "^7.22.1", + "@babel/types": "^7.22.0", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.2", + "semver": "^6.3.0" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": ">=6.9.0" }, "funding": { - "url": "https://opencollective.com/eslint" + "type": "opencollective", + "url": "https://opencollective.com/babel" } }, - "node_modules/@eslint/js": { - "version": "8.36.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.36.0.tgz", - "integrity": "sha512-lxJ9R5ygVm8ZWgYdUweoq5ownDlJ4upvoWmO4eLxBYHdMo+vZ/Rx0EN6MbKWDJOSUGrqJy2Gt+Dyv/VKml0fjg==", + "node_modules/@babel/core/node_modules/convert-source-map": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", + "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", + "dev": true + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", "dev": true, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "bin": { + "semver": "bin/semver.js" } }, - "node_modules/@humanwhocodes/config-array": { - "version": "0.11.8", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.8.tgz", - "integrity": "sha512-UybHIJzJnR5Qc/MsD9Kr+RpO2h+/P1GhOwdiLPXK5TWk5sgTdu88bTD9UP+CKbPPh5Rni1u0GjAdYQLemG8g+g==", + "node_modules/@babel/generator": { + "version": "7.22.3", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.22.3.tgz", + "integrity": "sha512-C17MW4wlk//ES/CJDL51kPNwl+qiBQyN7b9SKyVp11BLGFeSPoVaHrv+MNt8jwQFhQWowW88z1eeBx3pFz9v8A==", "dev": true, "dependencies": { - "@humanwhocodes/object-schema": "^1.2.1", - "debug": "^4.1.1", - "minimatch": "^3.0.5" + "@babel/types": "^7.22.3", + "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", + "jsesc": "^2.5.1" }, "engines": { - "node": ">=10.10.0" + "node": ">=6.9.0" } }, - "node_modules/@humanwhocodes/module-importer": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", - "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "node_modules/@babel/helper-compilation-targets": { + "version": "7.22.1", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.1.tgz", + "integrity": "sha512-Rqx13UM3yVB5q0D/KwQ8+SPfX/+Rnsy1Lw1k/UwOC4KC6qrzIQoY3lYnBu5EHKBlEHHcj0M0W8ltPSkD8rqfsQ==", "dev": true, + "dependencies": { + "@babel/compat-data": "^7.22.0", + "@babel/helper-validator-option": "^7.21.0", + "browserslist": "^4.21.3", + "lru-cache": "^5.1.1", + "semver": "^6.3.0" + }, "engines": { - "node": ">=12.22" + "node": ">=6.9.0" }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/nzakas" + "peerDependencies": { + "@babel/core": "^7.0.0" } }, - "node_modules/@humanwhocodes/object-schema": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", - "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", + "node_modules/@babel/helper-compilation-targets/node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", "dev": true }, - "node_modules/@nodelib/fs.scandir": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", - "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "node_modules/@babel/helper-environment-visitor": { + "version": "7.22.1", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.1.tgz", + "integrity": "sha512-Z2tgopurB/kTbidvzeBrc2To3PUP/9i5MUe+fU6QJCQDyPwSH2oRapkLw3KGECDYSjhQZCNxEvNvZlLw8JjGwA==", "dev": true, - "dependencies": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" - }, "engines": { - "node": ">= 8" + "node": ">=6.9.0" } }, - "node_modules/@nodelib/fs.stat": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", - "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "node_modules/@babel/helper-function-name": { + "version": "7.21.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.21.0.tgz", + "integrity": "sha512-HfK1aMRanKHpxemaY2gqBmL04iAPOPRj7DxtNbiDOrJK+gdwkiNRVpCpUJYbUT+aZyemKN8brqTOxzCaG6ExRg==", "dev": true, + "dependencies": { + "@babel/template": "^7.20.7", + "@babel/types": "^7.21.0" + }, "engines": { - "node": ">= 8" + "node": ">=6.9.0" } }, - "node_modules/@nodelib/fs.walk": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", - "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "node_modules/@babel/helper-hoist-variables": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", + "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", "dev": true, "dependencies": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" + "@babel/types": "^7.18.6" }, "engines": { - "node": ">= 8" + "node": ">=6.9.0" } }, - "node_modules/@types/json-schema": { - "version": "7.0.11", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", - "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==", - "dev": true - }, - "node_modules/@types/node": { - "version": "18.11.9", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.9.tgz", - "integrity": "sha512-CRpX21/kGdzjOpFsZSkcrXMGIBWMGNIHXXBVFSH+ggkftxg+XYP20TESbh+zFvFj3EQOl5byk0HTRn1IL6hbqg==", - "dev": true - }, - "node_modules/@types/semver": { - "version": "7.3.13", - "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.3.13.tgz", - "integrity": "sha512-21cFJr9z3g5dW8B0CVI9g2O9beqaThGQ6ZFBqHfwhzLDKUxaqTIy3vnfah/UPkfOiF2pLq+tGz+W8RyCskuslw==", - "dev": true - }, - "node_modules/@typescript-eslint/eslint-plugin": { - "version": "5.56.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.56.0.tgz", - "integrity": "sha512-ZNW37Ccl3oMZkzxrYDUX4o7cnuPgU+YrcaYXzsRtLB16I1FR5SHMqga3zGsaSliZADCWo2v8qHWqAYIj8nWCCg==", + "node_modules/@babel/helper-module-imports": { + "version": "7.21.4", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.21.4.tgz", + "integrity": "sha512-orajc5T2PsRYUN3ZryCEFeMDYwyw09c/pZeaQEZPH0MpKzSvn3e0uXsDBu3k03VI+9DBiRo+l22BfKTpKwa/Wg==", "dev": true, "dependencies": { - "@eslint-community/regexpp": "^4.4.0", - "@typescript-eslint/scope-manager": "5.56.0", - "@typescript-eslint/type-utils": "5.56.0", - "@typescript-eslint/utils": "5.56.0", - "debug": "^4.3.4", - "grapheme-splitter": "^1.0.4", - "ignore": "^5.2.0", - "natural-compare-lite": "^1.4.0", - "semver": "^7.3.7", - "tsutils": "^3.21.0" + "@babel/types": "^7.21.4" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "@typescript-eslint/parser": "^5.0.0", - "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "node": ">=6.9.0" } }, - "node_modules/@typescript-eslint/parser": { - "version": "5.56.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.56.0.tgz", - "integrity": "sha512-sn1OZmBxUsgxMmR8a8U5QM/Wl+tyqlH//jTqCg8daTAmhAk26L2PFhcqPLlYBhYUJMZJK276qLXlHN3a83o2cg==", + "node_modules/@babel/helper-module-transforms": { + "version": "7.22.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.1.tgz", + "integrity": "sha512-dxAe9E7ySDGbQdCVOY/4+UcD8M9ZFqZcZhSPsPacvCG4M+9lwtDDQfI2EoaSvmf7W/8yCBkGU0m7Pvt1ru3UZw==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "5.56.0", - "@typescript-eslint/types": "5.56.0", - "@typescript-eslint/typescript-estree": "5.56.0", - "debug": "^4.3.4" + "@babel/helper-environment-visitor": "^7.22.1", + "@babel/helper-module-imports": "^7.21.4", + "@babel/helper-simple-access": "^7.21.5", + "@babel/helper-split-export-declaration": "^7.18.6", + "@babel/helper-validator-identifier": "^7.19.1", + "@babel/template": "^7.21.9", + "@babel/traverse": "^7.22.1", + "@babel/types": "^7.22.0" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "node": ">=6.9.0" } }, - "node_modules/@typescript-eslint/scope-manager": { - "version": "5.56.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.56.0.tgz", - "integrity": "sha512-jGYKyt+iBakD0SA5Ww8vFqGpoV2asSjwt60Gl6YcO8ksQ8s2HlUEyHBMSa38bdLopYqGf7EYQMUIGdT/Luw+sw==", + "node_modules/@babel/helper-plugin-utils": { + "version": "7.21.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.21.5.tgz", + "integrity": "sha512-0WDaIlXKOX/3KfBK/dwP1oQGiPh6rjMkT7HIRv7i5RR2VUMwrx5ZL0dwBkKx7+SW1zwNdgjHd34IMk5ZjTeHVg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.21.5", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.21.5.tgz", + "integrity": "sha512-ENPDAMC1wAjR0uaCUwliBdiSl1KBJAVnMTzXqi64c2MG8MPR6ii4qf7bSXDqSFbr4W6W028/rf5ivoHop5/mkg==", "dev": true, "dependencies": { - "@typescript-eslint/types": "5.56.0", - "@typescript-eslint/visitor-keys": "5.56.0" + "@babel/types": "^7.21.5" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" + "node": ">=6.9.0" } }, - "node_modules/@typescript-eslint/type-utils": { - "version": "5.56.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.56.0.tgz", - "integrity": "sha512-8WxgOgJjWRy6m4xg9KoSHPzBNZeQbGlQOH7l2QEhQID/+YseaFxg5J/DLwWSsi9Axj4e/cCiKx7PVzOq38tY4A==", + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", + "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "5.56.0", - "@typescript-eslint/utils": "5.56.0", - "debug": "^4.3.4", - "tsutils": "^3.21.0" + "@babel/types": "^7.18.6" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "*" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "node": ">=6.9.0" } }, - "node_modules/@typescript-eslint/types": { - "version": "5.56.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.56.0.tgz", - "integrity": "sha512-JyAzbTJcIyhuUhogmiu+t79AkdnqgPUEsxMTMc/dCZczGMJQh1MK2wgrju++yMN6AWroVAy2jxyPcPr3SWCq5w==", + "node_modules/@babel/helper-string-parser": { + "version": "7.21.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.21.5.tgz", + "integrity": "sha512-5pTUx3hAJaZIdW99sJ6ZUUgWq/Y+Hja7TowEnLNMm1VivRgZQL3vpBY3qUACVsvw+yQU6+YgfBVmcbLaZtrA1w==", "dev": true, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" + "node": ">=6.9.0" } }, - "node_modules/@typescript-eslint/typescript-estree": { - "version": "5.56.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.56.0.tgz", - "integrity": "sha512-41CH/GncsLXOJi0jb74SnC7jVPWeVJ0pxQj8bOjH1h2O26jXN3YHKDT1ejkVz5YeTEQPeLCCRY0U2r68tfNOcg==", + "node_modules/@babel/helper-validator-identifier": { + "version": "7.19.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", + "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.21.0", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.21.0.tgz", + "integrity": "sha512-rmL/B8/f0mKS2baE9ZpyTcTavvEuWhTTW8amjzXNvYG4AwBsqTLikfXsEofsJEfKHf+HQVQbFOHy6o+4cnC/fQ==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.22.3", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.3.tgz", + "integrity": "sha512-jBJ7jWblbgr7r6wYZHMdIqKc73ycaTcCaWRq4/2LpuPHcx7xMlZvpGQkOYc9HeSjn6rcx15CPlgVcBtZ4WZJ2w==", "dev": true, "dependencies": { - "@typescript-eslint/types": "5.56.0", - "@typescript-eslint/visitor-keys": "5.56.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "semver": "^7.3.7", - "tsutils": "^3.21.0" + "@babel/template": "^7.21.9", + "@babel/traverse": "^7.22.1", + "@babel/types": "^7.22.3" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "node": ">=6.9.0" } }, - "node_modules/@typescript-eslint/utils": { - "version": "5.56.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.56.0.tgz", - "integrity": "sha512-XhZDVdLnUJNtbzaJeDSCIYaM+Tgr59gZGbFuELgF7m0IY03PlciidS7UQNKLE0+WpUTn1GlycEr6Ivb/afjbhA==", + "node_modules/@babel/highlight": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", + "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", "dev": true, "dependencies": { - "@eslint-community/eslint-utils": "^4.2.0", - "@types/json-schema": "^7.0.9", - "@types/semver": "^7.3.12", - "@typescript-eslint/scope-manager": "5.56.0", - "@typescript-eslint/types": "5.56.0", - "@typescript-eslint/typescript-estree": "5.56.0", - "eslint-scope": "^5.1.1", - "semver": "^7.3.7" + "@babel/helper-validator-identifier": "^7.18.6", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + "node": ">=6.9.0" } }, - "node_modules/@typescript-eslint/visitor-keys": { - "version": "5.56.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.56.0.tgz", - "integrity": "sha512-1mFdED7u5bZpX6Xxf5N9U2c18sb+8EvU3tyOIj6LQZ5OOvnmj8BVeNNP603OFPm5KkS1a7IvCIcwrdHXaEMG/Q==", + "node_modules/@babel/highlight/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", "dev": true, "dependencies": { - "@typescript-eslint/types": "5.56.0", - "eslint-visitor-keys": "^3.3.0" + "color-convert": "^1.9.0" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" + "node": ">=4" } }, - "node_modules/acorn": { - "version": "8.8.2", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz", - "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==", + "node_modules/@babel/highlight/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", "dev": true, - "bin": { - "acorn": "bin/acorn" + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" }, "engines": { - "node": ">=0.4.0" + "node": ">=4" } }, - "node_modules/acorn-jsx": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", - "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "node_modules/@babel/highlight/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", "dev": true, - "peerDependencies": { - "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + "dependencies": { + "color-name": "1.1.3" } }, - "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "node_modules/@babel/highlight/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" + "engines": { + "node": ">=0.8.0" } }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "node_modules/@babel/highlight/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", "dev": true, "engines": { - "node": ">=8" + "node": ">=4" } }, - "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "node_modules/@babel/highlight/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", "dev": true, "dependencies": { - "color-convert": "^2.0.1" + "has-flag": "^3.0.0" }, "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" + "node": ">=4" } }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, - "node_modules/array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "node_modules/@babel/parser": { + "version": "7.22.4", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.22.4.tgz", + "integrity": "sha512-VLLsx06XkEYqBtE5YGPwfSGwfrjnyPP5oiGty3S8pQLFDFLaS8VwWSIxkTXpcvr5zeYLE6+MBNl2npl/YnfofA==", "dev": true, + "bin": { + "parser": "bin/babel-parser.js" + }, "engines": { - "node": ">=8" + "node": ">=6.0.0" } }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" - }, - "node_modules/axios": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.1.3.tgz", - "integrity": "sha512-00tXVRwKx/FZr/IDVFt4C+f9FYairX517WoGCL6dpOntqLkZofjhu43F/Xl44UOpqa+9sLFDrG/XAnFsUYgkDA==", + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dev": true, "dependencies": { - "follow-redirects": "^1.15.0", - "form-data": "^4.0.0", - "proxy-from-env": "^1.1.0" + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } }, - "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", "dev": true, "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", "dev": true, "dependencies": { - "fill-range": "^7.0.1" + "@babel/helper-plugin-utils": "^7.10.4" }, - "engines": { - "node": ">=8" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", "dev": true, - "engines": { - "node": ">=6" + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.21.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.21.4.tgz", + "integrity": "sha512-5hewiLct5OKyh6PLKEYaFclcqtIgCb6bmELouxjF6up5q3Sov7rOayW4RwhbaBL0dit8rA80GNfY+UuDp2mBbQ==", "dev": true, "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" + "@babel/helper-plugin-utils": "^7.20.2" }, "engines": { - "node": ">=10" + "node": ">=6.9.0" }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/class-transformer": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/class-transformer/-/class-transformer-0.5.1.tgz", - "integrity": "sha512-SQa1Ws6hUbfC98vKGxZH3KFY0Y1lm5Zm0SY8XX9zbK7FJCyVEac3ATW0RIpwzW+oOfmHE5PMPufDG9hCfoEOMw==" + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", "dev": true, "dependencies": { - "color-name": "~1.1.4" + "@babel/helper-plugin-utils": "^7.8.0" }, - "engines": { - "node": ">=7.0.0" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, "dependencies": { - "delayed-stream": "~1.0.0" + "@babel/helper-plugin-utils": "^7.8.0" }, - "engines": { - "node": ">= 0.8" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "dev": true + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } }, - "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", "dev": true, "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" + "@babel/helper-plugin-utils": "^7.8.0" }, - "engines": { - "node": ">= 8" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", "dev": true, "dependencies": { - "ms": "2.1.2" + "@babel/helper-plugin-utils": "^7.14.5" }, "engines": { - "node": ">=6.0" + "node": ">=6.9.0" }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/deep-is": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", - "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", - "dev": true - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.21.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.21.4.tgz", + "integrity": "sha512-xz0D39NvhQn4t4RNsHmDnnsaQizIlUkdtYvLs8La1BlfjQ6JEwxkJGeqJMW2tAXx+q6H+WFuUTXNdYVpEya0YA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.20.2" + }, "engines": { - "node": ">=0.4.0" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "node_modules/@babel/template": { + "version": "7.21.9", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.21.9.tgz", + "integrity": "sha512-MK0X5k8NKOuWRamiEfc3KEJiHMTkGZNUjzMipqCGDDc6ijRl/B7RGSKVGncu4Ro/HdyzzY6cmoXuKI2Gffk7vQ==", "dev": true, "dependencies": { - "path-type": "^4.0.0" + "@babel/code-frame": "^7.21.4", + "@babel/parser": "^7.21.9", + "@babel/types": "^7.21.5" }, "engines": { - "node": ">=8" + "node": ">=6.9.0" } }, - "node_modules/doctrine": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", - "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "node_modules/@babel/traverse": { + "version": "7.22.4", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.22.4.tgz", + "integrity": "sha512-Tn1pDsjIcI+JcLKq1AVlZEr4226gpuAQTsLMorsYg9tuS/kG7nuwwJ4AB8jfQuEgb/COBwR/DqJxmoiYFu5/rQ==", "dev": true, "dependencies": { - "esutils": "^2.0.2" + "@babel/code-frame": "^7.21.4", + "@babel/generator": "^7.22.3", + "@babel/helper-environment-visitor": "^7.22.1", + "@babel/helper-function-name": "^7.21.0", + "@babel/helper-hoist-variables": "^7.18.6", + "@babel/helper-split-export-declaration": "^7.18.6", + "@babel/parser": "^7.22.4", + "@babel/types": "^7.22.4", + "debug": "^4.1.0", + "globals": "^11.1.0" }, "engines": { - "node": ">=6.0.0" + "node": ">=6.9.0" } }, - "node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "node_modules/@babel/traverse/node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", "dev": true, "engines": { - "node": ">=10" + "node": ">=4" + } + }, + "node_modules/@babel/types": { + "version": "7.22.4", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.22.4.tgz", + "integrity": "sha512-Tx9x3UBHTTsMSW85WB2kphxYQVvrZ/t1FxD88IpSgIjiUJlCm9z+xWIDwyo1vffTwSqteqyznB8ZE9vYYk16zA==", + "dev": true, + "dependencies": { + "@babel/helper-string-parser": "^7.21.5", + "@babel/helper-validator-identifier": "^7.19.1", + "to-fast-properties": "^2.0.0" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "engines": { + "node": ">=6.9.0" } }, - "node_modules/eslint": { - "version": "8.36.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.36.0.tgz", - "integrity": "sha512-Y956lmS7vDqomxlaaQAHVmeb4tNMp2FWIvU/RnU5BD3IKMD/MJPr76xdyr68P8tV1iNMvN2mRK0yy3c+UjL+bw==", + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true + }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", "dev": true, "dependencies": { - "@eslint-community/eslint-utils": "^4.2.0", - "@eslint-community/regexpp": "^4.4.0", - "@eslint/eslintrc": "^2.0.1", - "@eslint/js": "8.36.0", - "@humanwhocodes/config-array": "^0.11.8", - "@humanwhocodes/module-importer": "^1.0.1", - "@nodelib/fs.walk": "^1.2.8", - "ajv": "^6.10.0", - "chalk": "^4.0.0", - "cross-spawn": "^7.0.2", - "debug": "^4.3.2", - "doctrine": "^3.0.0", - "escape-string-regexp": "^4.0.0", - "eslint-scope": "^7.1.1", - "eslint-visitor-keys": "^3.3.0", - "espree": "^9.5.0", - "esquery": "^1.4.2", - "esutils": "^2.0.2", - "fast-deep-equal": "^3.1.3", - "file-entry-cache": "^6.0.1", - "find-up": "^5.0.0", - "glob-parent": "^6.0.2", - "globals": "^13.19.0", - "grapheme-splitter": "^1.0.4", - "ignore": "^5.2.0", - "import-fresh": "^3.0.0", - "imurmurhash": "^0.1.4", - "is-glob": "^4.0.0", - "is-path-inside": "^3.0.3", - "js-sdsl": "^4.1.4", - "js-yaml": "^4.1.0", - "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.4.1", - "lodash.merge": "^4.6.2", - "minimatch": "^3.1.2", - "natural-compare": "^1.4.0", - "optionator": "^0.9.1", - "strip-ansi": "^6.0.1", - "strip-json-comments": "^3.1.0", - "text-table": "^0.2.0" - }, - "bin": { - "eslint": "bin/eslint.js" + "@jridgewell/trace-mapping": "0.3.9" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" + "node": ">=12" } }, - "node_modules/eslint-scope": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", - "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", "dev": true, "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/eslint-visitor-keys": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.3.0.tgz", - "integrity": "sha512-mQ+suqKJVyeuwGYHAdjMFqjCyfl8+Ldnxuyp3ldiMBFKkvytrXUZWaiPCEav8qDHKty44bD+qV1IP4T+w+xXRA==", - "dev": true, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" } }, - "node_modules/eslint/node_modules/eslint-scope": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.1.1.tgz", - "integrity": "sha512-QKQM/UXpIiHcLqJ5AOyIW7XZmzjkzQXYE54n1++wb0u9V/abW3l9uQnxX8Z5Xd18xyKIMTUAyQ0k1e8pz6LUrw==", + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", "dev": true, "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^5.2.0" + "eslint-visitor-keys": "^3.3.0" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" } }, - "node_modules/eslint/node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "node_modules/@eslint-community/regexpp": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.4.1.tgz", + "integrity": "sha512-BISJ6ZE4xQsuL/FmsyRaiffpq977bMlsKfGHTQrOGFErfByxIe6iZTxPf/00Zon9b9a7iUykfQwejN3s2ZW/Bw==", "dev": true, "engines": { - "node": ">=4.0" + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" } }, - "node_modules/espree": { - "version": "9.5.0", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.5.0.tgz", - "integrity": "sha512-JPbJGhKc47++oo4JkEoTe2wjy4fmMwvFpgJT9cQzmfXKp22Dr6Hf1tdCteLz1h0P3t+mGvWZ+4Uankvh8+c6zw==", + "node_modules/@eslint/eslintrc": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.0.1.tgz", + "integrity": "sha512-eFRmABvW2E5Ho6f5fHLqgena46rOj7r7OKHYfLElqcBfGFHHpjBhivyi5+jOEQuSpdc/1phIZJlbC2te+tZNIw==", "dev": true, "dependencies": { - "acorn": "^8.8.0", - "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.3.0" + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.5.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -734,983 +705,4871 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/esquery": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", - "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", + "node_modules/@eslint/js": { + "version": "8.36.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.36.0.tgz", + "integrity": "sha512-lxJ9R5ygVm8ZWgYdUweoq5ownDlJ4upvoWmO4eLxBYHdMo+vZ/Rx0EN6MbKWDJOSUGrqJy2Gt+Dyv/VKml0fjg==", "dev": true, - "dependencies": { - "estraverse": "^5.1.0" - }, "engines": { - "node": ">=0.10" + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, - "node_modules/esquery/node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "node_modules/@humanwhocodes/config-array": { + "version": "0.11.8", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.8.tgz", + "integrity": "sha512-UybHIJzJnR5Qc/MsD9Kr+RpO2h+/P1GhOwdiLPXK5TWk5sgTdu88bTD9UP+CKbPPh5Rni1u0GjAdYQLemG8g+g==", "dev": true, + "dependencies": { + "@humanwhocodes/object-schema": "^1.2.1", + "debug": "^4.1.1", + "minimatch": "^3.0.5" + }, "engines": { - "node": ">=4.0" + "node": ">=10.10.0" } }, - "node_modules/esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", "dev": true, - "dependencies": { - "estraverse": "^5.2.0" - }, "engines": { - "node": ">=4.0" + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" } }, - "node_modules/esrecurse/node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "node_modules/@humanwhocodes/object-schema": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", + "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", + "dev": true + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", "dev": true, + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, "engines": { - "node": ">=4.0" + "node": ">=8" } }, - "node_modules/estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", "dev": true, - "engines": { - "node": ">=4.0" + "dependencies": { + "sprintf-js": "~1.0.2" } }, - "node_modules/esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "node_modules/@istanbuljs/load-nyc-config/node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", "dev": true, + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, "engines": { - "node": ">=0.10.0" + "node": ">=4" } }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true - }, - "node_modules/fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "node_modules/@istanbuljs/load-nyc-config/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", "dev": true, "dependencies": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.4" + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" }, "engines": { - "node": ">=8.6.0" + "node": ">=8" } }, - "node_modules/fast-glob/node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", "dev": true, "dependencies": { - "is-glob": "^4.0.1" + "argparse": "^1.0.7", + "esprima": "^4.0.0" }, - "engines": { - "node": ">= 6" + "bin": { + "js-yaml": "bin/js-yaml.js" } }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "dev": true - }, - "node_modules/fast-levenshtein": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", - "dev": true - }, - "node_modules/fastq": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", - "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", + "node_modules/@istanbuljs/load-nyc-config/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", "dev": true, "dependencies": { - "reusify": "^1.0.4" + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" } }, - "node_modules/file-entry-cache": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", - "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "dev": true, "dependencies": { - "flat-cache": "^3.0.4" + "p-try": "^2.0.0" }, "engines": { - "node": "^10.12.0 || >=12.0.0" + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", "dev": true, "dependencies": { - "to-regex-range": "^5.0.1" + "p-limit": "^2.2.0" }, "engines": { "node": ">=8" } }, - "node_modules/find-up": { + "node_modules/@istanbuljs/load-nyc-config/node_modules/resolve-from": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", "dev": true, - "dependencies": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=8" } }, - "node_modules/flat-cache": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", - "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/console": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.5.0.tgz", + "integrity": "sha512-NEpkObxPwyw/XxZVLPmAGKE89IQRp4puc6IQRPru6JKd1M3fW9v1xM1AnzIJE65hbCkzQAdnL8P47e9hzhiYLQ==", "dev": true, "dependencies": { - "flatted": "^3.1.0", - "rimraf": "^3.0.2" + "@jest/types": "^29.5.0", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^29.5.0", + "jest-util": "^29.5.0", + "slash": "^3.0.0" }, "engines": { - "node": "^10.12.0 || >=12.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/flatted": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz", - "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==", - "dev": true - }, - "node_modules/follow-redirects": { - "version": "1.15.2", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz", - "integrity": "sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], + "node_modules/@jest/core": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.5.0.tgz", + "integrity": "sha512-28UzQc7ulUrOQw1IsN/kv1QES3q2kkbl/wGslyhAclqZ/8cMdB5M68BffkIdSJgKBUt50d3hbwJ92XESlE7LiQ==", + "dev": true, + "dependencies": { + "@jest/console": "^29.5.0", + "@jest/reporters": "^29.5.0", + "@jest/test-result": "^29.5.0", + "@jest/transform": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^29.5.0", + "jest-config": "^29.5.0", + "jest-haste-map": "^29.5.0", + "jest-message-util": "^29.5.0", + "jest-regex-util": "^29.4.3", + "jest-resolve": "^29.5.0", + "jest-resolve-dependencies": "^29.5.0", + "jest-runner": "^29.5.0", + "jest-runtime": "^29.5.0", + "jest-snapshot": "^29.5.0", + "jest-util": "^29.5.0", + "jest-validate": "^29.5.0", + "jest-watcher": "^29.5.0", + "micromatch": "^4.0.4", + "pretty-format": "^29.5.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" + }, "engines": { - "node": ">=4.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" }, "peerDependenciesMeta": { - "debug": { + "node-notifier": { "optional": true } } }, - "node_modules/form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "node_modules/@jest/environment": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.5.0.tgz", + "integrity": "sha512-5FXw2+wD29YU1d4I2htpRX7jYnAyTRjP2CsXQdo9SAM8g3ifxWPSV0HnClSn71xwctr0U3oZIIH+dtbfmnbXVQ==", + "dev": true, "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" + "@jest/fake-timers": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/node": "*", + "jest-mock": "^29.5.0" }, "engines": { - "node": ">= 6" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "dev": true - }, - "node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "node_modules/@jest/expect": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.5.0.tgz", + "integrity": "sha512-PueDR2HGihN3ciUNGr4uelropW7rqUfTiOn+8u0leg/42UhblPxHkfoh0Ruu3I9Y1962P3u2DY4+h7GVTSVU6g==", "dev": true, "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "expect": "^29.5.0", + "jest-snapshot": "^29.5.0" }, "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "node_modules/@jest/expect-utils": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.5.0.tgz", + "integrity": "sha512-fmKzsidoXQT2KwnrwE0SQq3uj8Z763vzR8LnLBwC2qYWEFpjX8daRsk6rHUM1QvNlEW/UJXNXm59ztmJJWs2Mg==", "dev": true, "dependencies": { - "is-glob": "^4.0.3" + "jest-get-type": "^29.4.3" }, "engines": { - "node": ">=10.13.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/globals": { - "version": "13.20.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", - "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", + "node_modules/@jest/fake-timers": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.5.0.tgz", + "integrity": "sha512-9ARvuAAQcBwDAqOnglWq2zwNIRUDtk/SCkp/ToGEhFv5r86K21l+VEs0qNTaXtyiY0lEePl3kylijSYJQqdbDg==", "dev": true, "dependencies": { - "type-fest": "^0.20.2" + "@jest/types": "^29.5.0", + "@sinonjs/fake-timers": "^10.0.2", + "@types/node": "*", + "jest-message-util": "^29.5.0", + "jest-mock": "^29.5.0", + "jest-util": "^29.5.0" }, "engines": { - "node": ">=8" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/globals": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.5.0.tgz", + "integrity": "sha512-S02y0qMWGihdzNbUiqSAiKSpSozSuHX5UYc7QbnHP+D9Lyw8DgGGCinrN9uSuHPeKgSSzvPom2q1nAtBvUsvPQ==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.5.0", + "@jest/expect": "^29.5.0", + "@jest/types": "^29.5.0", + "jest-mock": "^29.5.0" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "node_modules/@jest/reporters": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.5.0.tgz", + "integrity": "sha512-D05STXqj/M8bP9hQNSICtPqz97u7ffGzZu+9XLucXhkOFBqKcXe04JLZOgIekOxdb73MAoBUFnqvf7MCpKk5OA==", "dev": true, "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^29.5.0", + "@jest/test-result": "^29.5.0", + "@jest/transform": "^29.5.0", + "@jest/types": "^29.5.0", + "@jridgewell/trace-mapping": "^0.3.15", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^5.1.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-message-util": "^29.5.0", + "jest-util": "^29.5.0", + "jest-worker": "^29.5.0", + "slash": "^3.0.0", + "string-length": "^4.0.1", + "strip-ansi": "^6.0.0", + "v8-to-istanbul": "^9.0.1" }, "engines": { - "node": ">=10" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } } }, - "node_modules/grapheme-splitter": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/grapheme-splitter/-/grapheme-splitter-1.0.4.tgz", - "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==", - "dev": true - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "node_modules/@jest/schemas": { + "version": "29.4.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.4.3.tgz", + "integrity": "sha512-VLYKXQmtmuEz6IxJsrZwzG9NvtkQsWNnWMsKxqWNu3+CnfzJQhp0WDDKWLVV9hLKr0l3SLLFRqcYHjhtyuDVxg==", "dev": true, + "dependencies": { + "@sinclair/typebox": "^0.25.16" + }, "engines": { - "node": ">=8" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/ignore": { - "version": "5.2.4", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", - "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", + "node_modules/@jest/source-map": { + "version": "29.4.3", + "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.4.3.tgz", + "integrity": "sha512-qyt/mb6rLyd9j1jUts4EQncvS6Yy3PM9HghnNv86QBlV+zdL2inCdK1tuVlL+J+lpiw2BI67qXOrX3UurBqQ1w==", "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.15", + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9" + }, "engines": { - "node": ">= 4" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/import-fresh": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", - "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "node_modules/@jest/test-result": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.5.0.tgz", + "integrity": "sha512-fGl4rfitnbfLsrfx1uUpDEESS7zM8JdgZgOCQuxQvL1Sn/I6ijeAVQWGfXI9zb1i9Mzo495cIpVZhA0yr60PkQ==", "dev": true, "dependencies": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" + "@jest/console": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" }, "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "node_modules/@jest/test-sequencer": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.5.0.tgz", + "integrity": "sha512-yPafQEcKjkSfDXyvtgiV4pevSeyuA6MQr6ZIdVkWJly9vkqjnFfcfhRQqpD5whjoU8EORki752xQmjaqoFjzMQ==", "dev": true, + "dependencies": { + "@jest/test-result": "^29.5.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.5.0", + "slash": "^3.0.0" + }, "engines": { - "node": ">=0.8.19" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "node_modules/@jest/transform": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.5.0.tgz", + "integrity": "sha512-8vbeZWqLJOvHaDfeMuoHITGKSz5qWc9u04lnWrQE3VyuSw604PzQM824ZeX9XSjUCeDiE3GuxZe5UKa8J61NQw==", "dev": true, "dependencies": { - "once": "^1.3.0", - "wrappy": "1" + "@babel/core": "^7.11.6", + "@jest/types": "^29.5.0", + "@jridgewell/trace-mapping": "^0.3.15", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.5.0", + "jest-regex-util": "^29.4.3", + "jest-util": "^29.5.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true - }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "node_modules/@jest/types": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.5.0.tgz", + "integrity": "sha512-qbu7kN6czmVRc3xWFQcAN03RAUamgppVUdXrvl1Wr3jlNF93o9mJbGcDWrwGB6ht44u7efB1qCFgVQmca24Uog==", "dev": true, + "dependencies": { + "@jest/schemas": "^29.4.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, "engines": { - "node": ">=0.10.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", + "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", "dev": true, "dependencies": { - "is-extglob": "^2.1.1" + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" }, "engines": { - "node": ">=0.10.0" + "node": ">=6.0.0" } }, - "node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", + "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==", "dev": true, "engines": { - "node": ">=0.12.0" + "node": ">=6.0.0" } }, - "node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "node_modules/@jridgewell/set-array": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", + "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", "dev": true, "engines": { - "node": ">=8" + "node": ">=6.0.0" } }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", "dev": true }, - "node_modules/js-sdsl": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/js-sdsl/-/js-sdsl-4.4.0.tgz", - "integrity": "sha512-FfVSdx6pJ41Oa+CF7RDaFmTnCaFhua+SNYQX74riGOpl96x+2jQCqEfQ2bnXu/5DPCqlRuiqyvTJM0Qjz26IVg==", - "dev": true, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/js-sdsl" - } - }, - "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.18", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.18.tgz", + "integrity": "sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==", "dev": true, "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" + "@jridgewell/resolve-uri": "3.1.0", + "@jridgewell/sourcemap-codec": "1.4.14" } }, - "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true - }, - "node_modules/json-stable-stringify-without-jsonify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", - "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "node_modules/@jridgewell/trace-mapping/node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.14", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", + "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==", "dev": true }, - "node_modules/levn": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", - "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", "dev": true, "dependencies": { - "prelude-ls": "^1.2.1", - "type-check": "~0.4.0" + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" }, "engines": { - "node": ">= 0.8.0" + "node": ">= 8" } }, - "node_modules/locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", "dev": true, - "dependencies": { - "p-locate": "^5.0.0" - }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">= 8" } }, - "node_modules/lodash.merge": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", - "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.25.24", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz", + "integrity": "sha512-XJfwUVUKDHF5ugKwIcxEgc9k8b7HbznCp6eUfWgu710hMPNIO4aw4/zB5RogDQz8nd6gyCDpU9O/m6qYEWY6yQ==", + "dev": true + }, + "node_modules/@sinonjs/commons": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.0.tgz", + "integrity": "sha512-jXBtWAF4vmdNmZgD5FoKsVLv3rPgDnLgPbU84LIJ3otV44vJlDRokVng5v8NFJdCf/da9legHcKaRuZs4L7faA==", + "dev": true, + "dependencies": { + "type-detect": "4.0.8" + } + }, + "node_modules/@sinonjs/fake-timers": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.2.0.tgz", + "integrity": "sha512-OPwQlEdg40HAj5KNF8WW6q2KG4Z+cBCZb3m4ninfTZKaBmbIJodviQsDBoYMPHkOyJJMHnOJo5j2+LKDOhOACg==", + "dev": true, + "dependencies": { + "@sinonjs/commons": "^3.0.0" + } + }, + "node_modules/@tsconfig/node10": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.9.tgz", + "integrity": "sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==", + "dev": true + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", + "dev": true + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", + "dev": true + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", + "dev": true + }, + "node_modules/@types/babel__core": { + "version": "7.20.1", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.1.tgz", + "integrity": "sha512-aACu/U/omhdk15O4Nfb+fHgH/z3QsfQzpnvRZhYhThms83ZnAOZz7zZAWO7mn2yyNQaA4xTO8GLK3uqFU4bYYw==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.6.4", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.4.tgz", + "integrity": "sha512-tFkciB9j2K755yrTALxD44McOrk+gfpIpvC3sxHjRawj6PfnQxrse4Clq5y/Rq+G3mrBurMax/lG8Qn2t9mSsg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.1.tgz", + "integrity": "sha512-azBFKemX6kMg5Io+/rdGT0dkGreboUVR0Cdm3fz9QJWpaQGJRQXl7C+6hOTCZcMll7KFyEQpgbYI2lHdsS4U7g==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.20.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.0.tgz", + "integrity": "sha512-TBOjqAGf0hmaqRwpii5LLkJLg7c6OMm4nHLmpsUxwk9bBHtoTC6dAHdVWdGv4TBxj2CZOZY8Xfq8WmfoVi7n4Q==", + "dev": true, + "dependencies": { + "@babel/types": "^7.20.7" + } + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.6.tgz", + "integrity": "sha512-Sig0SNORX9fdW+bQuTEovKj3uHcUL6LQKbCrrqb1X7J6/ReAbhCXRAhc+SMejhLELFj2QcyuxmUooZ4bt5ReSw==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", + "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==", + "dev": true + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", + "integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz", + "integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.11", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", + "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==", + "dev": true + }, + "node_modules/@types/jsonpath": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@types/jsonpath/-/jsonpath-0.2.0.tgz", + "integrity": "sha512-v7qlPA0VpKUlEdhghbDqRoKMxFB3h3Ch688TApBJ6v+XLDdvWCGLJIYiPKGZnS6MAOie+IorCfNYVHOPIHSWwQ==", + "dev": true + }, + "node_modules/@types/node": { + "version": "18.11.9", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.9.tgz", + "integrity": "sha512-CRpX21/kGdzjOpFsZSkcrXMGIBWMGNIHXXBVFSH+ggkftxg+XYP20TESbh+zFvFj3EQOl5byk0HTRn1IL6hbqg==", + "dev": true + }, + "node_modules/@types/prettier": { + "version": "2.7.3", + "resolved": "https://registry.npmjs.org/@types/prettier/-/prettier-2.7.3.tgz", + "integrity": "sha512-+68kP9yzs4LMp7VNh8gdzMSPZFL44MLGqiHWvttYJe+6qnuVr4Ek9wSBQoveqY/r+LwjCcU29kNVkidwim+kYA==", + "dev": true + }, + "node_modules/@types/semver": { + "version": "7.3.13", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.3.13.tgz", + "integrity": "sha512-21cFJr9z3g5dW8B0CVI9g2O9beqaThGQ6ZFBqHfwhzLDKUxaqTIy3vnfah/UPkfOiF2pLq+tGz+W8RyCskuslw==", + "dev": true + }, + "node_modules/@types/stack-utils": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.1.tgz", + "integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==", + "dev": true + }, + "node_modules/@types/yargs": { + "version": "17.0.24", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.24.tgz", + "integrity": "sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==", + "dev": true, + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.0", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz", + "integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==", + "dev": true + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "5.56.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.56.0.tgz", + "integrity": "sha512-ZNW37Ccl3oMZkzxrYDUX4o7cnuPgU+YrcaYXzsRtLB16I1FR5SHMqga3zGsaSliZADCWo2v8qHWqAYIj8nWCCg==", + "dev": true, + "dependencies": { + "@eslint-community/regexpp": "^4.4.0", + "@typescript-eslint/scope-manager": "5.56.0", + "@typescript-eslint/type-utils": "5.56.0", + "@typescript-eslint/utils": "5.56.0", + "debug": "^4.3.4", + "grapheme-splitter": "^1.0.4", + "ignore": "^5.2.0", + "natural-compare-lite": "^1.4.0", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^5.0.0", + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "5.56.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.56.0.tgz", + "integrity": "sha512-sn1OZmBxUsgxMmR8a8U5QM/Wl+tyqlH//jTqCg8daTAmhAk26L2PFhcqPLlYBhYUJMZJK276qLXlHN3a83o2cg==", + "dev": true, + "dependencies": { + "@typescript-eslint/scope-manager": "5.56.0", + "@typescript-eslint/types": "5.56.0", + "@typescript-eslint/typescript-estree": "5.56.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "5.56.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.56.0.tgz", + "integrity": "sha512-jGYKyt+iBakD0SA5Ww8vFqGpoV2asSjwt60Gl6YcO8ksQ8s2HlUEyHBMSa38bdLopYqGf7EYQMUIGdT/Luw+sw==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.56.0", + "@typescript-eslint/visitor-keys": "5.56.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "5.56.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.56.0.tgz", + "integrity": "sha512-8WxgOgJjWRy6m4xg9KoSHPzBNZeQbGlQOH7l2QEhQID/+YseaFxg5J/DLwWSsi9Axj4e/cCiKx7PVzOq38tY4A==", + "dev": true, + "dependencies": { + "@typescript-eslint/typescript-estree": "5.56.0", + "@typescript-eslint/utils": "5.56.0", + "debug": "^4.3.4", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/types": { + "version": "5.56.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.56.0.tgz", + "integrity": "sha512-JyAzbTJcIyhuUhogmiu+t79AkdnqgPUEsxMTMc/dCZczGMJQh1MK2wgrju++yMN6AWroVAy2jxyPcPr3SWCq5w==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "5.56.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.56.0.tgz", + "integrity": "sha512-41CH/GncsLXOJi0jb74SnC7jVPWeVJ0pxQj8bOjH1h2O26jXN3YHKDT1ejkVz5YeTEQPeLCCRY0U2r68tfNOcg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.56.0", + "@typescript-eslint/visitor-keys": "5.56.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.3.7", + "tsutils": "^3.21.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "5.56.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.56.0.tgz", + "integrity": "sha512-XhZDVdLnUJNtbzaJeDSCIYaM+Tgr59gZGbFuELgF7m0IY03PlciidS7UQNKLE0+WpUTn1GlycEr6Ivb/afjbhA==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@types/json-schema": "^7.0.9", + "@types/semver": "^7.3.12", + "@typescript-eslint/scope-manager": "5.56.0", + "@typescript-eslint/types": "5.56.0", + "@typescript-eslint/typescript-estree": "5.56.0", + "eslint-scope": "^5.1.1", + "semver": "^7.3.7" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "5.56.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.56.0.tgz", + "integrity": "sha512-1mFdED7u5bZpX6Xxf5N9U2c18sb+8EvU3tyOIj6LQZ5OOvnmj8BVeNNP603OFPm5KkS1a7IvCIcwrdHXaEMG/Q==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "5.56.0", + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/acorn": { + "version": "8.8.2", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz", + "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", + "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "dev": true, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-escapes/node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", + "dev": true + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + }, + "node_modules/axios": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.1.3.tgz", + "integrity": "sha512-00tXVRwKx/FZr/IDVFt4C+f9FYairX517WoGCL6dpOntqLkZofjhu43F/Xl44UOpqa+9sLFDrG/XAnFsUYgkDA==", + "dependencies": { + "follow-redirects": "^1.15.0", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/babel-jest": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.5.0.tgz", + "integrity": "sha512-mA4eCDh5mSo2EcA9xQjVTpmbbNk32Zb3Q3QFQsNhaK56Q+yoXowzFodLux30HRgyOho5rsQ6B0P9QpMkvvnJ0Q==", + "dev": true, + "dependencies": { + "@jest/transform": "^29.5.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.5.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.5.0.tgz", + "integrity": "sha512-zSuuuAlTMT4mzLj2nPnUm6fsE6270vdOfnpbJ+RmruU75UhLFvL0N2NgI7xpeS7NaB6hGqmd5pVpGTDYvi4Q3w==", + "dev": true, + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.0.1.tgz", + "integrity": "sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ==", + "dev": true, + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.8.3", + "@babel/plugin-syntax-import-meta": "^7.8.3", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.8.3", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.8.3", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-top-level-await": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/babel-preset-jest": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.5.0.tgz", + "integrity": "sha512-JOMloxOqdiBSxMAzjRaH023/vvcaSaec49zvg+2LmNsktC7ei39LTJGw02J+9uUtTZUq6xbLyJ4dxe9sSmIuAg==", + "dev": true, + "dependencies": { + "babel-plugin-jest-hoist": "^29.5.0", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.21.7", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.7.tgz", + "integrity": "sha512-BauCXrQ7I2ftSqd2mvKHGo85XR0u7Ru3C/Hxsy/0TkfCtjrmAbPdzLGasmoiBxplpDXlPvdjX9u7srIMfgasNA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "caniuse-lite": "^1.0.30001489", + "electron-to-chromium": "^1.4.411", + "node-releases": "^2.0.12", + "update-browserslist-db": "^1.0.11" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bs-logger": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz", + "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==", + "dev": true, + "dependencies": { + "fast-json-stable-stringify": "2.x" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", + "dev": true, + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001492", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001492.tgz", + "integrity": "sha512-2efF8SAZwgAX1FJr87KWhvuJxnGJKOnctQa8xLOskAXNXq8oiuqgl6u1kk3fFpsp3GgvzlRjiK1sl63hNtFADw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/ci-info": { + "version": "3.8.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", + "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.2.tgz", + "integrity": "sha512-cOU9usZw8/dXIXKtwa8pM0OTJQuJkxMN6w30csNRUerHfeQ5R6U3kkU/FtJeIf3M202OHfY2U8ccInBG7/xogA==", + "dev": true + }, + "node_modules/class-transformer": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/class-transformer/-/class-transformer-0.5.1.tgz", + "integrity": "sha512-SQa1Ws6hUbfC98vKGxZH3KFY0Y1lm5Zm0SY8XX9zbK7FJCyVEac3ATW0RIpwzW+oOfmHE5PMPufDG9hCfoEOMw==" + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "dev": true, + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/collect-v8-coverage": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.1.tgz", + "integrity": "sha512-iBPtljfCNcTKNAto0KEtDfZ3qzjJvqE3aTGZsbhjSBlorqpXJlaWWtPO35D+ZImoC3KWejX64o+yPGxhWSTzfg==", + "dev": true + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true + }, + "node_modules/create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", + "dev": true + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/dedent": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-0.7.0.tgz", + "integrity": "sha512-Q6fKUPqnAHAyhiUgFU7BUzLiv0kd8saH9al7tnu5Q/okj6dnupxyTgFIBjVzJATdfIAm9NAsvXNzjaKa+bxVyA==", + "dev": true + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==" + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/detect-newline": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/diff": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", + "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", + "dev": true, + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/diff-sequences": { + "version": "29.4.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.4.3.tgz", + "integrity": "sha512-ofrBgwpPhCD85kMKtE9RYFFq6OC1A89oW2vvgWZNCwxrUpRUILopY7lsYyMDSjc8g6U6aiO0Qubg6r4Wgt5ZnA==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.4.414", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.414.tgz", + "integrity": "sha512-RRuCvP6ekngVh2SAJaOKT/hxqc9JAsK+Pe0hP5tGQIfonU2Zy9gMGdJ+mBdyl/vNucMG6gkXYtuM4H/1giws5w==", + "dev": true + }, + "node_modules/emittery": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", + "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/escodegen": { + "version": "1.14.3", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.14.3.tgz", + "integrity": "sha512-qFcX0XJkdg+PB3xjZZG/wKSuT1PnQWx57+TVSjIMmILd2yC/6ByYElPwJnslDsuWuSAp4AwJGumarAAmJch5Kw==", + "dependencies": { + "esprima": "^4.0.1", + "estraverse": "^4.2.0", + "esutils": "^2.0.2", + "optionator": "^0.8.1" + }, + "bin": { + "escodegen": "bin/escodegen.js", + "esgenerate": "bin/esgenerate.js" + }, + "engines": { + "node": ">=4.0" + }, + "optionalDependencies": { + "source-map": "~0.6.1" + } + }, + "node_modules/escodegen/node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/escodegen/node_modules/levn": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", + "integrity": "sha512-0OO4y2iOHix2W6ujICbKIaEQXvFQHue65vUG3pb5EUomzPI90z9hsA1VsO/dbIIpC53J8gxM9Q4Oho0jrCM/yA==", + "dependencies": { + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/escodegen/node_modules/optionator": { + "version": "0.8.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", + "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", + "dependencies": { + "deep-is": "~0.1.3", + "fast-levenshtein": "~2.0.6", + "levn": "~0.3.0", + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2", + "word-wrap": "~1.2.3" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/escodegen/node_modules/prelude-ls": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", + "integrity": "sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w==", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/escodegen/node_modules/type-check": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", + "integrity": "sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==", + "dependencies": { + "prelude-ls": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/eslint": { + "version": "8.36.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.36.0.tgz", + "integrity": "sha512-Y956lmS7vDqomxlaaQAHVmeb4tNMp2FWIvU/RnU5BD3IKMD/MJPr76xdyr68P8tV1iNMvN2mRK0yy3c+UjL+bw==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.4.0", + "@eslint/eslintrc": "^2.0.1", + "@eslint/js": "8.36.0", + "@humanwhocodes/config-array": "^0.11.8", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "ajv": "^6.10.0", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.1.1", + "eslint-visitor-keys": "^3.3.0", + "espree": "^9.5.0", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "grapheme-splitter": "^1.0.4", + "ignore": "^5.2.0", + "import-fresh": "^3.0.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-sdsl": "^4.1.4", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.1", + "strip-ansi": "^6.0.1", + "strip-json-comments": "^3.1.0", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.3.0.tgz", + "integrity": "sha512-mQ+suqKJVyeuwGYHAdjMFqjCyfl8+Ldnxuyp3ldiMBFKkvytrXUZWaiPCEav8qDHKty44bD+qV1IP4T+w+xXRA==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/eslint/node_modules/eslint-scope": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.1.1.tgz", + "integrity": "sha512-QKQM/UXpIiHcLqJ5AOyIW7XZmzjkzQXYE54n1++wb0u9V/abW3l9uQnxX8Z5Xd18xyKIMTUAyQ0k1e8pz6LUrw==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/eslint/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/espree": { + "version": "9.5.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.5.0.tgz", + "integrity": "sha512-JPbJGhKc47++oo4JkEoTe2wjy4fmMwvFpgJT9cQzmfXKp22Dr6Hf1tdCteLz1h0P3t+mGvWZ+4Uankvh8+c6zw==", + "dev": true, + "dependencies": { + "acorn": "^8.8.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esprima": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-1.2.2.tgz", + "integrity": "sha512-+JpPZam9w5DuJ3Q67SqsMGtiHKENSMRVoxvArfJZK01/BfLEObtZ6orJa/MtoGNR/rfMgp5837T41PAmTwAv/A==", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/esquery": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", + "dev": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esquery/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/expect": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.5.0.tgz", + "integrity": "sha512-yM7xqUrCO2JdpFo4XpM82t+PJBFybdqoQuJLDGeDX2ij8NZzqRHyu3Hp188/JX7SWqud+7t4MUdvcgGBICMHZg==", + "dev": true, + "dependencies": { + "@jest/expect-utils": "^29.5.0", + "jest-get-type": "^29.4.3", + "jest-matcher-utils": "^29.5.0", + "jest-message-util": "^29.5.0", + "jest-util": "^29.5.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true + }, + "node_modules/fast-glob": { + "version": "3.2.12", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", + "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==" + }, + "node_modules/fastq": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", + "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", + "dev": true, + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", + "dev": true, + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", + "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", + "dev": true, + "dependencies": { + "flatted": "^3.1.0", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz", + "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==", + "dev": true + }, + "node_modules/follow-redirects": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz", + "integrity": "sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true + }, + "node_modules/fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", + "dev": true + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "13.20.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", + "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true + }, + "node_modules/grapheme-splitter": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/grapheme-splitter/-/grapheme-splitter-1.0.4.tgz", + "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==", + "dev": true + }, + "node_modules/has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "dev": true, + "dependencies": { + "function-bind": "^1.1.1" + }, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/ignore": { + "version": "5.2.4", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", + "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dev": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-local": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz", + "integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==", + "dev": true, + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true + }, + "node_modules/is-core-module": { + "version": "2.12.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.1.tgz", + "integrity": "sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==", + "dev": true, + "dependencies": { + "has": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz", + "integrity": "sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", + "integrity": "sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw==", + "dev": true, + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^3.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.5.tgz", + "integrity": "sha512-nUsEMa9pBt/NOHqbcbeJEgqIlY/K7rVWUX6Lql2orY5e9roQOthbR3vtY4zzf2orPELg80fnxxk9zUyPlgwD1w==", + "dev": true, + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.5.0.tgz", + "integrity": "sha512-juMg3he2uru1QoXX078zTa7pO85QyB9xajZc6bU+d9yEGwrKX6+vGmJQ3UdVZsvTEUARIdObzH68QItim6OSSQ==", + "dev": true, + "dependencies": { + "@jest/core": "^29.5.0", + "@jest/types": "^29.5.0", + "import-local": "^3.0.2", + "jest-cli": "^29.5.0" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-changed-files": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.5.0.tgz", + "integrity": "sha512-IFG34IUMUaNBIxjQXF/iu7g6EcdMrGRRxaUSw92I/2g2YC6vCdTltl4nHvt7Ci5nSJwXIkCu8Ka1DKF+X7Z1Ag==", + "dev": true, + "dependencies": { + "execa": "^5.0.0", + "p-limit": "^3.1.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.5.0.tgz", + "integrity": "sha512-gq/ongqeQKAplVxqJmbeUOJJKkW3dDNPY8PjhJ5G0lBRvu0e3EWGxGy5cI4LAGA7gV2UHCtWBI4EMXK8c9nQKA==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.5.0", + "@jest/expect": "^29.5.0", + "@jest/test-result": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^0.7.0", + "is-generator-fn": "^2.0.0", + "jest-each": "^29.5.0", + "jest-matcher-utils": "^29.5.0", + "jest-message-util": "^29.5.0", + "jest-runtime": "^29.5.0", + "jest-snapshot": "^29.5.0", + "jest-util": "^29.5.0", + "p-limit": "^3.1.0", + "pretty-format": "^29.5.0", + "pure-rand": "^6.0.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-cli": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.5.0.tgz", + "integrity": "sha512-L1KcP1l4HtfwdxXNFCL5bmUbLQiKrakMUriBEcc1Vfz6gx31ORKdreuWvmQVBit+1ss9NNR3yxjwfwzZNdQXJw==", + "dev": true, + "dependencies": { + "@jest/core": "^29.5.0", + "@jest/test-result": "^29.5.0", + "@jest/types": "^29.5.0", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "import-local": "^3.0.2", + "jest-config": "^29.5.0", + "jest-util": "^29.5.0", + "jest-validate": "^29.5.0", + "prompts": "^2.0.1", + "yargs": "^17.3.1" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-config": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.5.0.tgz", + "integrity": "sha512-kvDUKBnNJPNBmFFOhDbm59iu1Fii1Q6SxyhXfvylq3UTHbg6o7j/g8k2dZyXWLvfdKB1vAPxNZnMgtKJcmu3kA==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/test-sequencer": "^29.5.0", + "@jest/types": "^29.5.0", + "babel-jest": "^29.5.0", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-circus": "^29.5.0", + "jest-environment-node": "^29.5.0", + "jest-get-type": "^29.4.3", + "jest-regex-util": "^29.4.3", + "jest-resolve": "^29.5.0", + "jest-runner": "^29.5.0", + "jest-util": "^29.5.0", + "jest-validate": "^29.5.0", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^29.5.0", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@types/node": "*", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/jest-diff": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.5.0.tgz", + "integrity": "sha512-LtxijLLZBduXnHSniy0WMdaHjmQnt3g5sa16W4p0HqukYTTsyTW3GD1q41TyGl5YFXj/5B2U6dlh5FM1LIMgxw==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.4.3", + "jest-get-type": "^29.4.3", + "pretty-format": "^29.5.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-docblock": { + "version": "29.4.3", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.4.3.tgz", + "integrity": "sha512-fzdTftThczeSD9nZ3fzA/4KkHtnmllawWrXO69vtI+L9WjEIuXWs4AmyME7lN5hU7dB0sHhuPfcKofRsUb/2Fg==", + "dev": true, + "dependencies": { + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.5.0.tgz", + "integrity": "sha512-HM5kIJ1BTnVt+DQZ2ALp3rzXEl+g726csObrW/jpEGl+CDSSQpOJJX2KE/vEg8cxcMXdyEPu6U4QX5eruQv5hA==", + "dev": true, + "dependencies": { + "@jest/types": "^29.5.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.4.3", + "jest-util": "^29.5.0", + "pretty-format": "^29.5.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.5.0.tgz", + "integrity": "sha512-ExxuIK/+yQ+6PRGaHkKewYtg6hto2uGCgvKdb2nfJfKXgZ17DfXjvbZ+jA1Qt9A8EQSfPnt5FKIfnOO3u1h9qw==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.5.0", + "@jest/fake-timers": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/node": "*", + "jest-mock": "^29.5.0", + "jest-util": "^29.5.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "29.4.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.4.3.tgz", + "integrity": "sha512-J5Xez4nRRMjk8emnTpWrlkyb9pfRQQanDrvWHhsR1+VUfbwxi30eVcZFlcdGInRibU4G5LwHXpI7IRHU0CY+gg==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.5.0.tgz", + "integrity": "sha512-IspOPnnBro8YfVYSw6yDRKh/TiCdRngjxeacCps1cQ9cgVN6+10JUcuJ1EabrgYLOATsIAigxA0rLR9x/YlrSA==", + "dev": true, + "dependencies": { + "@jest/types": "^29.5.0", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.4.3", + "jest-util": "^29.5.0", + "jest-worker": "^29.5.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-leak-detector": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.5.0.tgz", + "integrity": "sha512-u9YdeeVnghBUtpN5mVxjID7KbkKE1QU4f6uUwuxiY0vYRi9BUCLKlPEZfDGR67ofdFmDz9oPAy2G92Ujrntmow==", + "dev": true, + "dependencies": { + "jest-get-type": "^29.4.3", + "pretty-format": "^29.5.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.5.0.tgz", + "integrity": "sha512-lecRtgm/rjIK0CQ7LPQwzCs2VwW6WAahA55YBuI+xqmhm7LAaxokSB8C97yJeYyT+HvQkH741StzpU41wohhWw==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.5.0", + "jest-get-type": "^29.4.3", + "pretty-format": "^29.5.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.5.0.tgz", + "integrity": "sha512-Kijeg9Dag6CKtIDA7O21zNTACqD5MD/8HfIV8pdD94vFyFuer52SigdC3IQMhab3vACxXMiFk+yMHNdbqtyTGA==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.5.0", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.5.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-mock": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.5.0.tgz", + "integrity": "sha512-GqOzvdWDE4fAV2bWQLQCkujxYWL7RxjCnj71b5VhDAGOevB3qj3Ovg26A5NI84ZpODxyzaozXLOh2NCgkbvyaw==", + "dev": true, + "dependencies": { + "@jest/types": "^29.5.0", + "@types/node": "*", + "jest-util": "^29.5.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-pnp-resolver": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", + "dev": true, + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "jest-resolve": "*" + }, + "peerDependenciesMeta": { + "jest-resolve": { + "optional": true + } + } + }, + "node_modules/jest-regex-util": { + "version": "29.4.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.4.3.tgz", + "integrity": "sha512-O4FglZaMmWXbGHSQInfXewIsd1LMn9p3ZXB/6r4FOkyhX2/iP/soMG98jGvk/A3HAN78+5VWcBGO0BJAPRh4kg==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.5.0.tgz", + "integrity": "sha512-1TzxJ37FQq7J10jPtQjcc+MkCkE3GBpBecsSUWJ0qZNJpmg6m0D9/7II03yJulm3H/fvVjgqLh/k2eYg+ui52w==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.5.0", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^29.5.0", + "jest-validate": "^29.5.0", + "resolve": "^1.20.0", + "resolve.exports": "^2.0.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve-dependencies": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.5.0.tgz", + "integrity": "sha512-sjV3GFr0hDJMBpYeUuGduP+YeCRbd7S/ck6IvL3kQ9cpySYKqcqhdLLC2rFwrcL7tz5vYibomBrsFYWkIGGjOg==", + "dev": true, + "dependencies": { + "jest-regex-util": "^29.4.3", + "jest-snapshot": "^29.5.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.5.0.tgz", + "integrity": "sha512-m7b6ypERhFghJsslMLhydaXBiLf7+jXy8FwGRHO3BGV1mcQpPbwiqiKUR2zU2NJuNeMenJmlFZCsIqzJCTeGLQ==", + "dev": true, + "dependencies": { + "@jest/console": "^29.5.0", + "@jest/environment": "^29.5.0", + "@jest/test-result": "^29.5.0", + "@jest/transform": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^29.4.3", + "jest-environment-node": "^29.5.0", + "jest-haste-map": "^29.5.0", + "jest-leak-detector": "^29.5.0", + "jest-message-util": "^29.5.0", + "jest-resolve": "^29.5.0", + "jest-runtime": "^29.5.0", + "jest-util": "^29.5.0", + "jest-watcher": "^29.5.0", + "jest-worker": "^29.5.0", + "p-limit": "^3.1.0", + "source-map-support": "0.5.13" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.5.0.tgz", + "integrity": "sha512-1Hr6Hh7bAgXQP+pln3homOiEZtCDZFqwmle7Ew2j8OlbkIu6uE3Y/etJQG8MLQs3Zy90xrp2C0BRrtPHG4zryw==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.5.0", + "@jest/fake-timers": "^29.5.0", + "@jest/globals": "^29.5.0", + "@jest/source-map": "^29.4.3", + "@jest/test-result": "^29.5.0", + "@jest/transform": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/node": "*", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.5.0", + "jest-message-util": "^29.5.0", + "jest-mock": "^29.5.0", + "jest-regex-util": "^29.4.3", + "jest-resolve": "^29.5.0", + "jest-snapshot": "^29.5.0", + "jest-util": "^29.5.0", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.5.0.tgz", + "integrity": "sha512-x7Wolra5V0tt3wRs3/ts3S6ciSQVypgGQlJpz2rsdQYoUKxMxPNaoHMGJN6qAuPJqS+2iQ1ZUn5kl7HCyls84g==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-jsx": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/traverse": "^7.7.2", + "@babel/types": "^7.3.3", + "@jest/expect-utils": "^29.5.0", + "@jest/transform": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/babel__traverse": "^7.0.6", + "@types/prettier": "^2.1.5", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^29.5.0", + "graceful-fs": "^4.2.9", + "jest-diff": "^29.5.0", + "jest-get-type": "^29.4.3", + "jest-matcher-utils": "^29.5.0", + "jest-message-util": "^29.5.0", + "jest-util": "^29.5.0", + "natural-compare": "^1.4.0", + "pretty-format": "^29.5.0", + "semver": "^7.3.5" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.5.0.tgz", + "integrity": "sha512-RYMgG/MTadOr5t8KdhejfvUU82MxsCu5MF6KuDUHl+NuwzUt+Sm6jJWxTJVrDR1j5M/gJVCPKQEpWXY+yIQ6lQ==", + "dev": true, + "dependencies": { + "@jest/types": "^29.5.0", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.5.0.tgz", + "integrity": "sha512-pC26etNIi+y3HV8A+tUGr/lph9B18GnzSRAkPaaZJIE1eFdiYm6/CewuiJQ8/RlfHd1u/8Ioi8/sJ+CmbA+zAQ==", + "dev": true, + "dependencies": { + "@jest/types": "^29.5.0", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.4.3", + "leven": "^3.1.0", + "pretty-format": "^29.5.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate/node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-watcher": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.5.0.tgz", + "integrity": "sha512-KmTojKcapuqYrKDpRwfqcQ3zjMlwu27SYext9pt4GlF5FUgB+7XE1mcCnSm6a4uUpFyQIkb6ZhzZvHl+jiBCiA==", + "dev": true, + "dependencies": { + "@jest/test-result": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "jest-util": "^29.5.0", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.5.0.tgz", + "integrity": "sha512-NcrQnevGoSp4b5kg+akIpthoAFHxPBcb5P6mYPY0fUNT+sSvmtu6jlkEle3anczUKIKEbMxFimk9oTP/tpIPgA==", + "dev": true, + "dependencies": { + "@types/node": "*", + "jest-util": "^29.5.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/js-sdsl": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/js-sdsl/-/js-sdsl-4.4.0.tgz", + "integrity": "sha512-FfVSdx6pJ41Oa+CF7RDaFmTnCaFhua+SNYQX74riGOpl96x+2jQCqEfQ2bnXu/5DPCqlRuiqyvTJM0Qjz26IVg==", + "dev": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/js-sdsl" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonpath": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/jsonpath/-/jsonpath-1.1.1.tgz", + "integrity": "sha512-l6Cg7jRpixfbgoWgkrl77dgEj8RPvND0wMH6TwQmi9Qs4TFfS9u5cUFnbeKTwj5ga5Y3BTGGNI28k117LJ009w==", + "dependencies": { + "esprima": "1.2.2", + "static-eval": "2.0.2", + "underscore": "1.12.1" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", + "dev": true + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, + "node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/make-dir": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "dev": true, + "dependencies": { + "semver": "^6.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true + }, + "node_modules/makeerror": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", + "dev": true, + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dev": true, + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, + "node_modules/natural-compare-lite": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz", + "integrity": "sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==", + "dev": true + }, + "node_modules/node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "dev": true + }, + "node_modules/node-releases": { + "version": "2.0.12", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.12.tgz", + "integrity": "sha512-QzsYKWhXTWx8h1kIvqfnC++o0pEmpRQA/aenALsL2F4pqNVr7YzcdMlDij5WBnwftRbJCNJL/O7zdKaxKPHqgQ==", + "dev": true + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/optionator": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", + "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", + "dev": true, + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.3" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "dev": true + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.5.tgz", + "integrity": "sha512-8V9+HQPupnaXMA23c5hvl69zXvTwTzyAYasnkb0Tts4XvO4CliqONMOnvlq26rkhLC3nWDFBJf73LU1e1VZLaQ==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-dir/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/pretty-format": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.5.0.tgz", + "integrity": "sha512-V2mGkI31qdttvTFX7Mt4efOqHXqJWMu4/r66Xh3Z3BwZaPfPJgp6/gbwoujRpPUtfEF6AUUWx3Jim3GCw5g/Qw==", + "dev": true, + "dependencies": { + "@jest/schemas": "^29.4.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, + "node_modules/punycode": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", + "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/pure-rand": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.0.2.tgz", + "integrity": "sha512-6Yg0ekpKICSjPswYOuC5sku/TSWaRYlA0qsXqJgM/d/4pLPHPuTxK7Nbf7jFKzAeedUhR8C7K9Uv63FBsSo8xQ==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ] + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/react-is": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", + "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==", + "dev": true + }, + "node_modules/reflect-metadata": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.1.13.tgz", + "integrity": "sha512-Ts1Y/anZELhSsjMcU605fU9RE4Oi3p5ORujwbIKXfWa+0Zxs510Qrmrce5/Jowq3cHSZSJqBjypxmHarc+vEWg==" + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.2", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz", + "integrity": "sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==", + "dev": true, + "dependencies": { + "is-core-module": "^2.11.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-cwd/node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve.exports": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.2.tgz", + "integrity": "sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true, + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dev": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/semver": { + "version": "7.3.8", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz", + "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", "dev": true }, - "node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "devOptional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.13", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", + "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", "dev": true, "dependencies": { - "yallist": "^4.0.0" + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "dev": true, + "dependencies": { + "escape-string-regexp": "^2.0.0" }, "engines": { "node": ">=10" } }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "node_modules/stack-utils/node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", "dev": true, "engines": { - "node": ">= 8" + "node": ">=8" } }, - "node_modules/micromatch": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "node_modules/static-eval": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/static-eval/-/static-eval-2.0.2.tgz", + "integrity": "sha512-N/D219Hcr2bPjLxPiV+TQE++Tsmrady7TqAJugLy7Xk1EumfDWS/f5dtBbkRCGE7wKKXuYockQoj8Rm2/pVKyg==", + "dependencies": { + "escodegen": "^1.8.1" + } + }, + "node_modules/string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true + }, + "node_modules/tmpl": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", + "dev": true + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-jest": { + "version": "29.1.0", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.0.tgz", + "integrity": "sha512-ZhNr7Z4PcYa+JjMl62ir+zPiNJfXJN6E8hSLnaUKhOgqcn8vb3e537cpkd0FuAfRK3sR1LSqM1MOhliXNgOFPA==", + "dev": true, + "dependencies": { + "bs-logger": "0.x", + "fast-json-stable-stringify": "2.x", + "jest-util": "^29.0.0", + "json5": "^2.2.3", + "lodash.memoize": "4.x", + "make-error": "1.x", + "semver": "7.x", + "yargs-parser": "^21.0.1" + }, + "bin": { + "ts-jest": "cli.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": ">=7.0.0-beta.0 <8", + "@jest/types": "^29.0.0", + "babel-jest": "^29.0.0", + "jest": "^29.0.0", + "typescript": ">=4.3 <6" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "@jest/types": { + "optional": true + }, + "babel-jest": { + "optional": true + }, + "esbuild": { + "optional": true + } + } + }, + "node_modules/ts-node": { + "version": "10.9.1", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz", + "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==", + "dev": true, + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, + "node_modules/tslib": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", + "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", + "dev": true + }, + "node_modules/tsutils": { + "version": "3.21.0", + "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz", + "integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==", + "dev": true, + "dependencies": { + "tslib": "^1.8.1" + }, + "engines": { + "node": ">= 6" + }, + "peerDependencies": { + "typescript": ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta" + } + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "4.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.3.tgz", + "integrity": "sha512-CIfGzTelbKNEnLpLdGFgdyKhG23CKdKgQPOBc+OUNrkJ2vr+KSzsSV5kq5iWhEQbok+quxgGzrAtGWCyU7tHnA==", "dev": true, - "dependencies": { - "braces": "^3.0.2", - "picomatch": "^2.3.1" + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" }, "engines": { - "node": ">=8.6" + "node": ">=4.2.0" } }, - "node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "engines": { - "node": ">= 0.6" - } + "node_modules/underscore": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.12.1.tgz", + "integrity": "sha512-hEQt0+ZLDVUMhebKxL4x1BTtDY7bavVofhZ9KZ4aI26X9SRaE+Y3m83XUL1UP2jn8ynjndwCCpEHdUG+9pP1Tw==" }, - "node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "node_modules/update-browserslist-db": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", + "integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], "dependencies": { - "mime-db": "1.52.0" + "escalade": "^3.1.1", + "picocolors": "^1.0.0" }, - "engines": { - "node": ">= 0.6" + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" } }, - "node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", "dev": true, "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" + "punycode": "^2.1.0" } }, - "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", "dev": true }, - "node_modules/natural-compare": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", - "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", - "dev": true + "node_modules/v8-to-istanbul": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.1.0.tgz", + "integrity": "sha512-6z3GW9x8G1gd+JIIgQQQxXuiJtCXeAjp6RaPEPLv62mH3iPHPxV6W3robxtCzNErRo6ZwTmzWhsbNvjyEBKzKA==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^1.6.0" + }, + "engines": { + "node": ">=10.12.0" + } }, - "node_modules/natural-compare-lite": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz", - "integrity": "sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==", + "node_modules/v8-to-istanbul/node_modules/convert-source-map": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", + "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", "dev": true }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "node_modules/walker": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", "dev": true, "dependencies": { - "wrappy": "1" + "makeerror": "1.0.12" } }, - "node_modules/optionator": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", - "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "dev": true, "dependencies": { - "deep-is": "^0.1.3", - "fast-levenshtein": "^2.0.6", - "levn": "^0.4.1", - "prelude-ls": "^1.2.1", - "type-check": "^0.4.0", - "word-wrap": "^1.2.3" + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" }, "engines": { - "node": ">= 0.8.0" + "node": ">= 8" } }, - "node_modules/p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "dev": true, - "dependencies": { - "yocto-queue": "^0.1.0" - }, + "node_modules/word-wrap": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", + "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=0.10.0" } }, - "node_modules/p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", "dev": true, "dependencies": { - "p-limit": "^3.0.2" + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" }, "engines": { "node": ">=10" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" } }, - "node_modules/parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", "dev": true, "dependencies": { - "callsites": "^3.0.0" + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" }, "engines": { - "node": ">=6" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, - "node_modules/path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", "dev": true, "engines": { - "node": ">=8" + "node": ">=10" } }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", "dev": true, + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, "engines": { - "node": ">=0.10.0" + "node": ">=12" } }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", "dev": true, "engines": { - "node": ">=8" + "node": ">=12" } }, - "node_modules/path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "node_modules/yn": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", + "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", "dev": true, "engines": { - "node": ">=8" + "node": ">=6" } }, - "node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", "dev": true, "engines": { - "node": ">=8.6" + "node": ">=10" }, "funding": { - "url": "https://github.com/sponsors/jonschlinkert" + "url": "https://github.com/sponsors/sindresorhus" + } + } + }, + "dependencies": { + "@ampproject/remapping": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", + "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", + "dev": true, + "requires": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" } }, - "node_modules/prelude-ls": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", - "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "@babel/code-frame": { + "version": "7.21.4", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.21.4.tgz", + "integrity": "sha512-LYvhNKfwWSPpocw8GI7gpK2nq3HSDuEPC/uSYaALSJu9xjsalaaYFOq0Pwt5KmVqwEbZlDu81aLXwBOmD/Fv9g==", "dev": true, - "engines": { - "node": ">= 0.8.0" + "requires": { + "@babel/highlight": "^7.18.6" } }, - "node_modules/proxy-from-env": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", - "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + "@babel/compat-data": { + "version": "7.22.3", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.3.tgz", + "integrity": "sha512-aNtko9OPOwVESUFp3MZfD8Uzxl7JzSeJpd7npIoxCasU37PFbAQRpKglkaKwlHOyeJdrREpo8TW8ldrkYWwvIQ==", + "dev": true }, - "node_modules/punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "@babel/core": { + "version": "7.22.1", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.1.tgz", + "integrity": "sha512-Hkqu7J4ynysSXxmAahpN1jjRwVJ+NdpraFLIWflgjpVob3KNyK3/tIUc7Q7szed8WMp0JNa7Qtd1E9Oo22F9gA==", "dev": true, - "engines": { - "node": ">=6" + "requires": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.21.4", + "@babel/generator": "^7.22.0", + "@babel/helper-compilation-targets": "^7.22.1", + "@babel/helper-module-transforms": "^7.22.1", + "@babel/helpers": "^7.22.0", + "@babel/parser": "^7.22.0", + "@babel/template": "^7.21.9", + "@babel/traverse": "^7.22.1", + "@babel/types": "^7.22.0", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.2", + "semver": "^6.3.0" + }, + "dependencies": { + "convert-source-map": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", + "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", + "dev": true + }, + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + } } }, - "node_modules/queue-microtask": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "@babel/generator": { + "version": "7.22.3", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.22.3.tgz", + "integrity": "sha512-C17MW4wlk//ES/CJDL51kPNwl+qiBQyN7b9SKyVp11BLGFeSPoVaHrv+MNt8jwQFhQWowW88z1eeBx3pFz9v8A==", "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" + "requires": { + "@babel/types": "^7.22.3", + "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", + "jsesc": "^2.5.1" + } + }, + "@babel/helper-compilation-targets": { + "version": "7.22.1", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.1.tgz", + "integrity": "sha512-Rqx13UM3yVB5q0D/KwQ8+SPfX/+Rnsy1Lw1k/UwOC4KC6qrzIQoY3lYnBu5EHKBlEHHcj0M0W8ltPSkD8rqfsQ==", + "dev": true, + "requires": { + "@babel/compat-data": "^7.22.0", + "@babel/helper-validator-option": "^7.21.0", + "browserslist": "^4.21.3", + "lru-cache": "^5.1.1", + "semver": "^6.3.0" + }, + "dependencies": { + "lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "requires": { + "yallist": "^3.0.2" + } }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true }, - { - "type": "consulting", - "url": "https://feross.org/support" + "yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true } - ] + } }, - "node_modules/reflect-metadata": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.1.13.tgz", - "integrity": "sha512-Ts1Y/anZELhSsjMcU605fU9RE4Oi3p5ORujwbIKXfWa+0Zxs510Qrmrce5/Jowq3cHSZSJqBjypxmHarc+vEWg==" + "@babel/helper-environment-visitor": { + "version": "7.22.1", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.1.tgz", + "integrity": "sha512-Z2tgopurB/kTbidvzeBrc2To3PUP/9i5MUe+fU6QJCQDyPwSH2oRapkLw3KGECDYSjhQZCNxEvNvZlLw8JjGwA==", + "dev": true }, - "node_modules/resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "@babel/helper-function-name": { + "version": "7.21.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.21.0.tgz", + "integrity": "sha512-HfK1aMRanKHpxemaY2gqBmL04iAPOPRj7DxtNbiDOrJK+gdwkiNRVpCpUJYbUT+aZyemKN8brqTOxzCaG6ExRg==", + "dev": true, + "requires": { + "@babel/template": "^7.20.7", + "@babel/types": "^7.21.0" + } + }, + "@babel/helper-hoist-variables": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", + "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", + "dev": true, + "requires": { + "@babel/types": "^7.18.6" + } + }, + "@babel/helper-module-imports": { + "version": "7.21.4", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.21.4.tgz", + "integrity": "sha512-orajc5T2PsRYUN3ZryCEFeMDYwyw09c/pZeaQEZPH0MpKzSvn3e0uXsDBu3k03VI+9DBiRo+l22BfKTpKwa/Wg==", + "dev": true, + "requires": { + "@babel/types": "^7.21.4" + } + }, + "@babel/helper-module-transforms": { + "version": "7.22.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.1.tgz", + "integrity": "sha512-dxAe9E7ySDGbQdCVOY/4+UcD8M9ZFqZcZhSPsPacvCG4M+9lwtDDQfI2EoaSvmf7W/8yCBkGU0m7Pvt1ru3UZw==", + "dev": true, + "requires": { + "@babel/helper-environment-visitor": "^7.22.1", + "@babel/helper-module-imports": "^7.21.4", + "@babel/helper-simple-access": "^7.21.5", + "@babel/helper-split-export-declaration": "^7.18.6", + "@babel/helper-validator-identifier": "^7.19.1", + "@babel/template": "^7.21.9", + "@babel/traverse": "^7.22.1", + "@babel/types": "^7.22.0" + } + }, + "@babel/helper-plugin-utils": { + "version": "7.21.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.21.5.tgz", + "integrity": "sha512-0WDaIlXKOX/3KfBK/dwP1oQGiPh6rjMkT7HIRv7i5RR2VUMwrx5ZL0dwBkKx7+SW1zwNdgjHd34IMk5ZjTeHVg==", + "dev": true + }, + "@babel/helper-simple-access": { + "version": "7.21.5", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.21.5.tgz", + "integrity": "sha512-ENPDAMC1wAjR0uaCUwliBdiSl1KBJAVnMTzXqi64c2MG8MPR6ii4qf7bSXDqSFbr4W6W028/rf5ivoHop5/mkg==", "dev": true, - "engines": { - "node": ">=4" + "requires": { + "@babel/types": "^7.21.5" } }, - "node_modules/reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "@babel/helper-split-export-declaration": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", + "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", "dev": true, - "engines": { - "iojs": ">=1.0.0", - "node": ">=0.10.0" + "requires": { + "@babel/types": "^7.18.6" } }, - "node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "@babel/helper-string-parser": { + "version": "7.21.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.21.5.tgz", + "integrity": "sha512-5pTUx3hAJaZIdW99sJ6ZUUgWq/Y+Hja7TowEnLNMm1VivRgZQL3vpBY3qUACVsvw+yQU6+YgfBVmcbLaZtrA1w==", + "dev": true + }, + "@babel/helper-validator-identifier": { + "version": "7.19.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", + "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==", + "dev": true + }, + "@babel/helper-validator-option": { + "version": "7.21.0", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.21.0.tgz", + "integrity": "sha512-rmL/B8/f0mKS2baE9ZpyTcTavvEuWhTTW8amjzXNvYG4AwBsqTLikfXsEofsJEfKHf+HQVQbFOHy6o+4cnC/fQ==", + "dev": true + }, + "@babel/helpers": { + "version": "7.22.3", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.3.tgz", + "integrity": "sha512-jBJ7jWblbgr7r6wYZHMdIqKc73ycaTcCaWRq4/2LpuPHcx7xMlZvpGQkOYc9HeSjn6rcx15CPlgVcBtZ4WZJ2w==", "dev": true, - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "requires": { + "@babel/template": "^7.21.9", + "@babel/traverse": "^7.22.1", + "@babel/types": "^7.22.3" } }, - "node_modules/run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "@babel/highlight": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", + "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" + "requires": { + "@babel/helper-validator-identifier": "^7.18.6", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "requires": { + "color-convert": "^1.9.0" + } }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } }, - { - "type": "consulting", - "url": "https://feross.org/support" + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true + }, + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "requires": { + "has-flag": "^3.0.0" + } } - ], - "dependencies": { - "queue-microtask": "^1.2.2" } }, - "node_modules/semver": { - "version": "7.3.8", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.8.tgz", - "integrity": "sha512-NB1ctGL5rlHrPJtFDVIVzTyQylMLu9N9VICA6HSFJo8MCGVTMW6gfpicwKmmK/dAjTOrqu5l63JJOpDSrAis3A==", + "@babel/parser": { + "version": "7.22.4", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.22.4.tgz", + "integrity": "sha512-VLLsx06XkEYqBtE5YGPwfSGwfrjnyPP5oiGty3S8pQLFDFLaS8VwWSIxkTXpcvr5zeYLE6+MBNl2npl/YnfofA==", + "dev": true + }, + "@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" } }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", "dev": true, - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" } }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", "dev": true, - "engines": { - "node": ">=8" + "requires": { + "@babel/helper-plugin-utils": "^7.12.13" } }, - "node_modules/slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", "dev": true, - "engines": { - "node": ">=8" + "requires": { + "@babel/helper-plugin-utils": "^7.10.4" } }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", "dev": true, - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" } }, - "node_modules/strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "@babel/plugin-syntax-jsx": { + "version": "7.21.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.21.4.tgz", + "integrity": "sha512-5hewiLct5OKyh6PLKEYaFclcqtIgCb6bmELouxjF6up5q3Sov7rOayW4RwhbaBL0dit8rA80GNfY+UuDp2mBbQ==", "dev": true, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "requires": { + "@babel/helper-plugin-utils": "^7.20.2" } }, - "node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" + "requires": { + "@babel/helper-plugin-utils": "^7.10.4" } }, - "node_modules/text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", - "dev": true + "@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } }, - "node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", "dev": true, - "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" + "requires": { + "@babel/helper-plugin-utils": "^7.10.4" } }, - "node_modules/tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", - "dev": true + "@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } }, - "node_modules/tsutils": { - "version": "3.21.0", - "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz", - "integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==", + "@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", "dev": true, - "dependencies": { - "tslib": "^1.8.1" - }, - "engines": { - "node": ">= 6" - }, - "peerDependencies": { - "typescript": ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta" + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" } }, - "node_modules/type-check": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", - "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", "dev": true, - "dependencies": { - "prelude-ls": "^1.2.1" - }, - "engines": { - "node": ">= 0.8.0" + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" } }, - "node_modules/type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "requires": { + "@babel/helper-plugin-utils": "^7.14.5" } }, - "node_modules/typescript": { - "version": "4.9.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.3.tgz", - "integrity": "sha512-CIfGzTelbKNEnLpLdGFgdyKhG23CKdKgQPOBc+OUNrkJ2vr+KSzsSV5kq5iWhEQbok+quxgGzrAtGWCyU7tHnA==", + "@babel/plugin-syntax-typescript": { + "version": "7.21.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.21.4.tgz", + "integrity": "sha512-xz0D39NvhQn4t4RNsHmDnnsaQizIlUkdtYvLs8La1BlfjQ6JEwxkJGeqJMW2tAXx+q6H+WFuUTXNdYVpEya0YA==", "dev": true, - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=4.2.0" + "requires": { + "@babel/helper-plugin-utils": "^7.20.2" } }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "@babel/template": { + "version": "7.21.9", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.21.9.tgz", + "integrity": "sha512-MK0X5k8NKOuWRamiEfc3KEJiHMTkGZNUjzMipqCGDDc6ijRl/B7RGSKVGncu4Ro/HdyzzY6cmoXuKI2Gffk7vQ==", "dev": true, - "dependencies": { - "punycode": "^2.1.0" + "requires": { + "@babel/code-frame": "^7.21.4", + "@babel/parser": "^7.21.9", + "@babel/types": "^7.21.5" } }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "@babel/traverse": { + "version": "7.22.4", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.22.4.tgz", + "integrity": "sha512-Tn1pDsjIcI+JcLKq1AVlZEr4226gpuAQTsLMorsYg9tuS/kG7nuwwJ4AB8jfQuEgb/COBwR/DqJxmoiYFu5/rQ==", "dev": true, - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" + "requires": { + "@babel/code-frame": "^7.21.4", + "@babel/generator": "^7.22.3", + "@babel/helper-environment-visitor": "^7.22.1", + "@babel/helper-function-name": "^7.21.0", + "@babel/helper-hoist-variables": "^7.18.6", + "@babel/helper-split-export-declaration": "^7.18.6", + "@babel/parser": "^7.22.4", + "@babel/types": "^7.22.4", + "debug": "^4.1.0", + "globals": "^11.1.0" }, - "engines": { - "node": ">= 8" + "dependencies": { + "globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true + } } }, - "node_modules/word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "@babel/types": { + "version": "7.22.4", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.22.4.tgz", + "integrity": "sha512-Tx9x3UBHTTsMSW85WB2kphxYQVvrZ/t1FxD88IpSgIjiUJlCm9z+xWIDwyo1vffTwSqteqyznB8ZE9vYYk16zA==", "dev": true, - "engines": { - "node": ">=0.10.0" + "requires": { + "@babel/helper-string-parser": "^7.21.5", + "@babel/helper-validator-identifier": "^7.19.1", + "to-fast-properties": "^2.0.0" } }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "dev": true - }, - "node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", "dev": true - }, - "node_modules/yocto-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + }, + "@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", "dev": true, - "engines": { - "node": ">=10" + "requires": { + "@jridgewell/trace-mapping": "0.3.9" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "dependencies": { + "@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "requires": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + } } - } - }, - "dependencies": { + }, "@eslint-community/eslint-utils": { "version": "4.4.0", "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", @@ -1772,6 +5631,362 @@ "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", "dev": true }, + "@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "requires": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "dependencies": { + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "requires": { + "sprintf-js": "~1.0.2" + } + }, + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true + }, + "find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + }, + "locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "requires": { + "p-locate": "^4.1.0" + } + }, + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "requires": { + "p-limit": "^2.2.0" + } + }, + "resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true + } + } + }, + "@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true + }, + "@jest/console": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.5.0.tgz", + "integrity": "sha512-NEpkObxPwyw/XxZVLPmAGKE89IQRp4puc6IQRPru6JKd1M3fW9v1xM1AnzIJE65hbCkzQAdnL8P47e9hzhiYLQ==", + "dev": true, + "requires": { + "@jest/types": "^29.5.0", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^29.5.0", + "jest-util": "^29.5.0", + "slash": "^3.0.0" + } + }, + "@jest/core": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.5.0.tgz", + "integrity": "sha512-28UzQc7ulUrOQw1IsN/kv1QES3q2kkbl/wGslyhAclqZ/8cMdB5M68BffkIdSJgKBUt50d3hbwJ92XESlE7LiQ==", + "dev": true, + "requires": { + "@jest/console": "^29.5.0", + "@jest/reporters": "^29.5.0", + "@jest/test-result": "^29.5.0", + "@jest/transform": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^29.5.0", + "jest-config": "^29.5.0", + "jest-haste-map": "^29.5.0", + "jest-message-util": "^29.5.0", + "jest-regex-util": "^29.4.3", + "jest-resolve": "^29.5.0", + "jest-resolve-dependencies": "^29.5.0", + "jest-runner": "^29.5.0", + "jest-runtime": "^29.5.0", + "jest-snapshot": "^29.5.0", + "jest-util": "^29.5.0", + "jest-validate": "^29.5.0", + "jest-watcher": "^29.5.0", + "micromatch": "^4.0.4", + "pretty-format": "^29.5.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" + } + }, + "@jest/environment": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.5.0.tgz", + "integrity": "sha512-5FXw2+wD29YU1d4I2htpRX7jYnAyTRjP2CsXQdo9SAM8g3ifxWPSV0HnClSn71xwctr0U3oZIIH+dtbfmnbXVQ==", + "dev": true, + "requires": { + "@jest/fake-timers": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/node": "*", + "jest-mock": "^29.5.0" + } + }, + "@jest/expect": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.5.0.tgz", + "integrity": "sha512-PueDR2HGihN3ciUNGr4uelropW7rqUfTiOn+8u0leg/42UhblPxHkfoh0Ruu3I9Y1962P3u2DY4+h7GVTSVU6g==", + "dev": true, + "requires": { + "expect": "^29.5.0", + "jest-snapshot": "^29.5.0" + } + }, + "@jest/expect-utils": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.5.0.tgz", + "integrity": "sha512-fmKzsidoXQT2KwnrwE0SQq3uj8Z763vzR8LnLBwC2qYWEFpjX8daRsk6rHUM1QvNlEW/UJXNXm59ztmJJWs2Mg==", + "dev": true, + "requires": { + "jest-get-type": "^29.4.3" + } + }, + "@jest/fake-timers": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.5.0.tgz", + "integrity": "sha512-9ARvuAAQcBwDAqOnglWq2zwNIRUDtk/SCkp/ToGEhFv5r86K21l+VEs0qNTaXtyiY0lEePl3kylijSYJQqdbDg==", + "dev": true, + "requires": { + "@jest/types": "^29.5.0", + "@sinonjs/fake-timers": "^10.0.2", + "@types/node": "*", + "jest-message-util": "^29.5.0", + "jest-mock": "^29.5.0", + "jest-util": "^29.5.0" + } + }, + "@jest/globals": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.5.0.tgz", + "integrity": "sha512-S02y0qMWGihdzNbUiqSAiKSpSozSuHX5UYc7QbnHP+D9Lyw8DgGGCinrN9uSuHPeKgSSzvPom2q1nAtBvUsvPQ==", + "dev": true, + "requires": { + "@jest/environment": "^29.5.0", + "@jest/expect": "^29.5.0", + "@jest/types": "^29.5.0", + "jest-mock": "^29.5.0" + } + }, + "@jest/reporters": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.5.0.tgz", + "integrity": "sha512-D05STXqj/M8bP9hQNSICtPqz97u7ffGzZu+9XLucXhkOFBqKcXe04JLZOgIekOxdb73MAoBUFnqvf7MCpKk5OA==", + "dev": true, + "requires": { + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^29.5.0", + "@jest/test-result": "^29.5.0", + "@jest/transform": "^29.5.0", + "@jest/types": "^29.5.0", + "@jridgewell/trace-mapping": "^0.3.15", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^5.1.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-message-util": "^29.5.0", + "jest-util": "^29.5.0", + "jest-worker": "^29.5.0", + "slash": "^3.0.0", + "string-length": "^4.0.1", + "strip-ansi": "^6.0.0", + "v8-to-istanbul": "^9.0.1" + } + }, + "@jest/schemas": { + "version": "29.4.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.4.3.tgz", + "integrity": "sha512-VLYKXQmtmuEz6IxJsrZwzG9NvtkQsWNnWMsKxqWNu3+CnfzJQhp0WDDKWLVV9hLKr0l3SLLFRqcYHjhtyuDVxg==", + "dev": true, + "requires": { + "@sinclair/typebox": "^0.25.16" + } + }, + "@jest/source-map": { + "version": "29.4.3", + "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.4.3.tgz", + "integrity": "sha512-qyt/mb6rLyd9j1jUts4EQncvS6Yy3PM9HghnNv86QBlV+zdL2inCdK1tuVlL+J+lpiw2BI67qXOrX3UurBqQ1w==", + "dev": true, + "requires": { + "@jridgewell/trace-mapping": "^0.3.15", + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9" + } + }, + "@jest/test-result": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.5.0.tgz", + "integrity": "sha512-fGl4rfitnbfLsrfx1uUpDEESS7zM8JdgZgOCQuxQvL1Sn/I6ijeAVQWGfXI9zb1i9Mzo495cIpVZhA0yr60PkQ==", + "dev": true, + "requires": { + "@jest/console": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" + } + }, + "@jest/test-sequencer": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.5.0.tgz", + "integrity": "sha512-yPafQEcKjkSfDXyvtgiV4pevSeyuA6MQr6ZIdVkWJly9vkqjnFfcfhRQqpD5whjoU8EORki752xQmjaqoFjzMQ==", + "dev": true, + "requires": { + "@jest/test-result": "^29.5.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.5.0", + "slash": "^3.0.0" + } + }, + "@jest/transform": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.5.0.tgz", + "integrity": "sha512-8vbeZWqLJOvHaDfeMuoHITGKSz5qWc9u04lnWrQE3VyuSw604PzQM824ZeX9XSjUCeDiE3GuxZe5UKa8J61NQw==", + "dev": true, + "requires": { + "@babel/core": "^7.11.6", + "@jest/types": "^29.5.0", + "@jridgewell/trace-mapping": "^0.3.15", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.5.0", + "jest-regex-util": "^29.4.3", + "jest-util": "^29.5.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + } + }, + "@jest/types": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.5.0.tgz", + "integrity": "sha512-qbu7kN6czmVRc3xWFQcAN03RAUamgppVUdXrvl1Wr3jlNF93o9mJbGcDWrwGB6ht44u7efB1qCFgVQmca24Uog==", + "dev": true, + "requires": { + "@jest/schemas": "^29.4.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + } + }, + "@jridgewell/gen-mapping": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", + "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "dev": true, + "requires": { + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" + } + }, + "@jridgewell/resolve-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", + "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==", + "dev": true + }, + "@jridgewell/set-array": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", + "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "dev": true + }, + "@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", + "dev": true + }, + "@jridgewell/trace-mapping": { + "version": "0.3.18", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.18.tgz", + "integrity": "sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==", + "dev": true, + "requires": { + "@jridgewell/resolve-uri": "3.1.0", + "@jridgewell/sourcemap-codec": "1.4.14" + }, + "dependencies": { + "@jridgewell/sourcemap-codec": { + "version": "1.4.14", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", + "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==", + "dev": true + } + } + }, "@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", @@ -1798,24 +6013,179 @@ "fastq": "^1.6.0" } }, + "@sinclair/typebox": { + "version": "0.25.24", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz", + "integrity": "sha512-XJfwUVUKDHF5ugKwIcxEgc9k8b7HbznCp6eUfWgu710hMPNIO4aw4/zB5RogDQz8nd6gyCDpU9O/m6qYEWY6yQ==", + "dev": true + }, + "@sinonjs/commons": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.0.tgz", + "integrity": "sha512-jXBtWAF4vmdNmZgD5FoKsVLv3rPgDnLgPbU84LIJ3otV44vJlDRokVng5v8NFJdCf/da9legHcKaRuZs4L7faA==", + "dev": true, + "requires": { + "type-detect": "4.0.8" + } + }, + "@sinonjs/fake-timers": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.2.0.tgz", + "integrity": "sha512-OPwQlEdg40HAj5KNF8WW6q2KG4Z+cBCZb3m4ninfTZKaBmbIJodviQsDBoYMPHkOyJJMHnOJo5j2+LKDOhOACg==", + "dev": true, + "requires": { + "@sinonjs/commons": "^3.0.0" + } + }, + "@tsconfig/node10": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.9.tgz", + "integrity": "sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==", + "dev": true + }, + "@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", + "dev": true + }, + "@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", + "dev": true + }, + "@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", + "dev": true + }, + "@types/babel__core": { + "version": "7.20.1", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.1.tgz", + "integrity": "sha512-aACu/U/omhdk15O4Nfb+fHgH/z3QsfQzpnvRZhYhThms83ZnAOZz7zZAWO7mn2yyNQaA4xTO8GLK3uqFU4bYYw==", + "dev": true, + "requires": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "@types/babel__generator": { + "version": "7.6.4", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.4.tgz", + "integrity": "sha512-tFkciB9j2K755yrTALxD44McOrk+gfpIpvC3sxHjRawj6PfnQxrse4Clq5y/Rq+G3mrBurMax/lG8Qn2t9mSsg==", + "dev": true, + "requires": { + "@babel/types": "^7.0.0" + } + }, + "@types/babel__template": { + "version": "7.4.1", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.1.tgz", + "integrity": "sha512-azBFKemX6kMg5Io+/rdGT0dkGreboUVR0Cdm3fz9QJWpaQGJRQXl7C+6hOTCZcMll7KFyEQpgbYI2lHdsS4U7g==", + "dev": true, + "requires": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "@types/babel__traverse": { + "version": "7.20.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.0.tgz", + "integrity": "sha512-TBOjqAGf0hmaqRwpii5LLkJLg7c6OMm4nHLmpsUxwk9bBHtoTC6dAHdVWdGv4TBxj2CZOZY8Xfq8WmfoVi7n4Q==", + "dev": true, + "requires": { + "@babel/types": "^7.20.7" + } + }, + "@types/graceful-fs": { + "version": "4.1.6", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.6.tgz", + "integrity": "sha512-Sig0SNORX9fdW+bQuTEovKj3uHcUL6LQKbCrrqb1X7J6/ReAbhCXRAhc+SMejhLELFj2QcyuxmUooZ4bt5ReSw==", + "dev": true, + "requires": { + "@types/node": "*" + } + }, + "@types/istanbul-lib-coverage": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", + "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==", + "dev": true + }, + "@types/istanbul-lib-report": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", + "integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==", + "dev": true, + "requires": { + "@types/istanbul-lib-coverage": "*" + } + }, + "@types/istanbul-reports": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz", + "integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==", + "dev": true, + "requires": { + "@types/istanbul-lib-report": "*" + } + }, "@types/json-schema": { "version": "7.0.11", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==", "dev": true }, + "@types/jsonpath": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@types/jsonpath/-/jsonpath-0.2.0.tgz", + "integrity": "sha512-v7qlPA0VpKUlEdhghbDqRoKMxFB3h3Ch688TApBJ6v+XLDdvWCGLJIYiPKGZnS6MAOie+IorCfNYVHOPIHSWwQ==", + "dev": true + }, "@types/node": { "version": "18.11.9", "resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.9.tgz", "integrity": "sha512-CRpX21/kGdzjOpFsZSkcrXMGIBWMGNIHXXBVFSH+ggkftxg+XYP20TESbh+zFvFj3EQOl5byk0HTRn1IL6hbqg==", "dev": true }, + "@types/prettier": { + "version": "2.7.3", + "resolved": "https://registry.npmjs.org/@types/prettier/-/prettier-2.7.3.tgz", + "integrity": "sha512-+68kP9yzs4LMp7VNh8gdzMSPZFL44MLGqiHWvttYJe+6qnuVr4Ek9wSBQoveqY/r+LwjCcU29kNVkidwim+kYA==", + "dev": true + }, "@types/semver": { "version": "7.3.13", "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.3.13.tgz", "integrity": "sha512-21cFJr9z3g5dW8B0CVI9g2O9beqaThGQ6ZFBqHfwhzLDKUxaqTIy3vnfah/UPkfOiF2pLq+tGz+W8RyCskuslw==", "dev": true }, + "@types/stack-utils": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.1.tgz", + "integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==", + "dev": true + }, + "@types/yargs": { + "version": "17.0.24", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.24.tgz", + "integrity": "sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==", + "dev": true, + "requires": { + "@types/yargs-parser": "*" + } + }, + "@types/yargs-parser": { + "version": "21.0.0", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz", + "integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==", + "dev": true + }, "@typescript-eslint/eslint-plugin": { "version": "5.56.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.56.0.tgz", @@ -1928,6 +6298,12 @@ "dev": true, "requires": {} }, + "acorn-walk": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", + "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "dev": true + }, "ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", @@ -1940,6 +6316,23 @@ "uri-js": "^4.2.2" } }, + "ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "requires": { + "type-fest": "^0.21.3" + }, + "dependencies": { + "type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true + } + } + }, "ansi-regex": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", @@ -1955,6 +6348,22 @@ "color-convert": "^2.0.1" } }, + "anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "requires": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + } + }, + "arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", + "dev": true + }, "argparse": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", @@ -1982,6 +6391,76 @@ "proxy-from-env": "^1.1.0" } }, + "babel-jest": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.5.0.tgz", + "integrity": "sha512-mA4eCDh5mSo2EcA9xQjVTpmbbNk32Zb3Q3QFQsNhaK56Q+yoXowzFodLux30HRgyOho5rsQ6B0P9QpMkvvnJ0Q==", + "dev": true, + "requires": { + "@jest/transform": "^29.5.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.5.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + } + }, + "babel-plugin-istanbul": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", + "dev": true, + "requires": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + } + }, + "babel-plugin-jest-hoist": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.5.0.tgz", + "integrity": "sha512-zSuuuAlTMT4mzLj2nPnUm6fsE6270vdOfnpbJ+RmruU75UhLFvL0N2NgI7xpeS7NaB6hGqmd5pVpGTDYvi4Q3w==", + "dev": true, + "requires": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + } + }, + "babel-preset-current-node-syntax": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.0.1.tgz", + "integrity": "sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ==", + "dev": true, + "requires": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.8.3", + "@babel/plugin-syntax-import-meta": "^7.8.3", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.8.3", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.8.3", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-top-level-await": "^7.8.3" + } + }, + "babel-preset-jest": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.5.0.tgz", + "integrity": "sha512-JOMloxOqdiBSxMAzjRaH023/vvcaSaec49zvg+2LmNsktC7ei39LTJGw02J+9uUtTZUq6xbLyJ4dxe9sSmIuAg==", + "dev": true, + "requires": { + "babel-plugin-jest-hoist": "^29.5.0", + "babel-preset-current-node-syntax": "^1.0.0" + } + }, "balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", @@ -2007,12 +6486,60 @@ "fill-range": "^7.0.1" } }, + "browserslist": { + "version": "4.21.7", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.7.tgz", + "integrity": "sha512-BauCXrQ7I2ftSqd2mvKHGo85XR0u7Ru3C/Hxsy/0TkfCtjrmAbPdzLGasmoiBxplpDXlPvdjX9u7srIMfgasNA==", + "dev": true, + "requires": { + "caniuse-lite": "^1.0.30001489", + "electron-to-chromium": "^1.4.411", + "node-releases": "^2.0.12", + "update-browserslist-db": "^1.0.11" + } + }, + "bs-logger": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz", + "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==", + "dev": true, + "requires": { + "fast-json-stable-stringify": "2.x" + } + }, + "bser": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", + "dev": true, + "requires": { + "node-int64": "^0.4.0" + } + }, + "buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true + }, "callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", "dev": true }, + "camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true + }, + "caniuse-lite": { + "version": "1.0.30001492", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001492.tgz", + "integrity": "sha512-2efF8SAZwgAX1FJr87KWhvuJxnGJKOnctQa8xLOskAXNXq8oiuqgl6u1kk3fFpsp3GgvzlRjiK1sl63hNtFADw==", + "dev": true + }, "chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", @@ -2023,11 +6550,52 @@ "supports-color": "^7.1.0" } }, + "char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true + }, + "ci-info": { + "version": "3.8.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", + "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==", + "dev": true + }, + "cjs-module-lexer": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.2.tgz", + "integrity": "sha512-cOU9usZw8/dXIXKtwa8pM0OTJQuJkxMN6w30csNRUerHfeQ5R6U3kkU/FtJeIf3M202OHfY2U8ccInBG7/xogA==", + "dev": true + }, "class-transformer": { "version": "0.5.1", "resolved": "https://registry.npmjs.org/class-transformer/-/class-transformer-0.5.1.tgz", "integrity": "sha512-SQa1Ws6hUbfC98vKGxZH3KFY0Y1lm5Zm0SY8XX9zbK7FJCyVEac3ATW0RIpwzW+oOfmHE5PMPufDG9hCfoEOMw==" }, + "cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "requires": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + } + }, + "co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "dev": true + }, + "collect-v8-coverage": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.1.tgz", + "integrity": "sha512-iBPtljfCNcTKNAto0KEtDfZ3qzjJvqE3aTGZsbhjSBlorqpXJlaWWtPO35D+ZImoC3KWejX64o+yPGxhWSTzfg==", + "dev": true + }, "color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", @@ -2057,6 +6625,18 @@ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", "dev": true }, + "convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true + }, + "create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", + "dev": true + }, "cross-spawn": { "version": "7.0.3", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", @@ -2077,10 +6657,21 @@ "ms": "2.1.2" } }, + "dedent": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-0.7.0.tgz", + "integrity": "sha512-Q6fKUPqnAHAyhiUgFU7BUzLiv0kd8saH9al7tnu5Q/okj6dnupxyTgFIBjVzJATdfIAm9NAsvXNzjaKa+bxVyA==", + "dev": true + }, "deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", - "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==" + }, + "deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", "dev": true }, "delayed-stream": { @@ -2088,6 +6679,24 @@ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==" }, + "detect-newline": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", + "dev": true + }, + "diff": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", + "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", + "dev": true + }, + "diff-sequences": { + "version": "29.4.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.4.3.tgz", + "integrity": "sha512-ofrBgwpPhCD85kMKtE9RYFFq6OC1A89oW2vvgWZNCwxrUpRUILopY7lsYyMDSjc8g6U6aiO0Qubg6r4Wgt5ZnA==", + "dev": true + }, "dir-glob": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", @@ -2106,12 +6715,99 @@ "esutils": "^2.0.2" } }, + "electron-to-chromium": { + "version": "1.4.414", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.414.tgz", + "integrity": "sha512-RRuCvP6ekngVh2SAJaOKT/hxqc9JAsK+Pe0hP5tGQIfonU2Zy9gMGdJ+mBdyl/vNucMG6gkXYtuM4H/1giws5w==", + "dev": true + }, + "emittery": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", + "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", + "dev": true + }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, + "requires": { + "is-arrayish": "^0.2.1" + } + }, + "escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "dev": true + }, "escape-string-regexp": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", "dev": true }, + "escodegen": { + "version": "1.14.3", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.14.3.tgz", + "integrity": "sha512-qFcX0XJkdg+PB3xjZZG/wKSuT1PnQWx57+TVSjIMmILd2yC/6ByYElPwJnslDsuWuSAp4AwJGumarAAmJch5Kw==", + "requires": { + "esprima": "^4.0.1", + "estraverse": "^4.2.0", + "esutils": "^2.0.2", + "optionator": "^0.8.1", + "source-map": "~0.6.1" + }, + "dependencies": { + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" + }, + "levn": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", + "integrity": "sha512-0OO4y2iOHix2W6ujICbKIaEQXvFQHue65vUG3pb5EUomzPI90z9hsA1VsO/dbIIpC53J8gxM9Q4Oho0jrCM/yA==", + "requires": { + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2" + } + }, + "optionator": { + "version": "0.8.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", + "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", + "requires": { + "deep-is": "~0.1.3", + "fast-levenshtein": "~2.0.6", + "levn": "~0.3.0", + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2", + "word-wrap": "~1.2.3" + } + }, + "prelude-ls": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", + "integrity": "sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w==" + }, + "type-check": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", + "integrity": "sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==", + "requires": { + "prelude-ls": "~1.1.2" + } + } + } + }, "eslint": { "version": "8.36.0", "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.36.0.tgz", @@ -2205,6 +6901,11 @@ "eslint-visitor-keys": "^3.3.0" } }, + "esprima": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-1.2.2.tgz", + "integrity": "sha512-+JpPZam9w5DuJ3Q67SqsMGtiHKENSMRVoxvArfJZK01/BfLEObtZ6orJa/MtoGNR/rfMgp5837T41PAmTwAv/A==" + }, "esquery": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", @@ -2242,15 +6943,49 @@ "estraverse": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==" }, "esutils": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==" + }, + "execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "requires": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + } + }, + "exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", "dev": true }, + "expect": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.5.0.tgz", + "integrity": "sha512-yM7xqUrCO2JdpFo4XpM82t+PJBFybdqoQuJLDGeDX2ij8NZzqRHyu3Hp188/JX7SWqud+7t4MUdvcgGBICMHZg==", + "dev": true, + "requires": { + "@jest/expect-utils": "^29.5.0", + "jest-get-type": "^29.4.3", + "jest-matcher-utils": "^29.5.0", + "jest-message-util": "^29.5.0", + "jest-util": "^29.5.0" + } + }, "fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -2290,8 +7025,7 @@ "fast-levenshtein": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", - "dev": true + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==" }, "fastq": { "version": "1.15.0", @@ -2302,6 +7036,15 @@ "reusify": "^1.0.4" } }, + "fb-watchman": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", + "dev": true, + "requires": { + "bser": "2.1.1" + } + }, "file-entry-cache": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", @@ -2367,6 +7110,43 @@ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", "dev": true }, + "fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "optional": true + }, + "function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", + "dev": true + }, + "gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true + }, + "get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true + }, + "get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true + }, + "get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true + }, "glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", @@ -2413,18 +7193,45 @@ "slash": "^3.0.0" } }, + "graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true + }, "grapheme-splitter": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/grapheme-splitter/-/grapheme-splitter-1.0.4.tgz", "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==", "dev": true }, + "has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "dev": true, + "requires": { + "function-bind": "^1.1.1" + } + }, "has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "dev": true }, + "html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true + }, + "human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true + }, "ignore": { "version": "5.2.4", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", @@ -2441,6 +7248,16 @@ "resolve-from": "^4.0.0" } }, + "import-local": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz", + "integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==", + "dev": true, + "requires": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + } + }, "imurmurhash": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", @@ -2463,12 +7280,39 @@ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", "dev": true }, + "is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true + }, + "is-core-module": { + "version": "2.12.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.1.tgz", + "integrity": "sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==", + "dev": true, + "requires": { + "has": "^1.0.3" + } + }, "is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", "dev": true }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true + }, + "is-generator-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", + "dev": true + }, "is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", @@ -2490,11 +7334,505 @@ "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", "dev": true }, - "isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true + "is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "istanbul-lib-coverage": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz", + "integrity": "sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==", + "dev": true + }, + "istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "requires": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + } + } + }, + "istanbul-lib-report": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", + "integrity": "sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw==", + "dev": true, + "requires": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^3.0.0", + "supports-color": "^7.1.0" + } + }, + "istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "requires": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + } + }, + "istanbul-reports": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.5.tgz", + "integrity": "sha512-nUsEMa9pBt/NOHqbcbeJEgqIlY/K7rVWUX6Lql2orY5e9roQOthbR3vtY4zzf2orPELg80fnxxk9zUyPlgwD1w==", + "dev": true, + "requires": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + } + }, + "jest": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.5.0.tgz", + "integrity": "sha512-juMg3he2uru1QoXX078zTa7pO85QyB9xajZc6bU+d9yEGwrKX6+vGmJQ3UdVZsvTEUARIdObzH68QItim6OSSQ==", + "dev": true, + "requires": { + "@jest/core": "^29.5.0", + "@jest/types": "^29.5.0", + "import-local": "^3.0.2", + "jest-cli": "^29.5.0" + } + }, + "jest-changed-files": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.5.0.tgz", + "integrity": "sha512-IFG34IUMUaNBIxjQXF/iu7g6EcdMrGRRxaUSw92I/2g2YC6vCdTltl4nHvt7Ci5nSJwXIkCu8Ka1DKF+X7Z1Ag==", + "dev": true, + "requires": { + "execa": "^5.0.0", + "p-limit": "^3.1.0" + } + }, + "jest-circus": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.5.0.tgz", + "integrity": "sha512-gq/ongqeQKAplVxqJmbeUOJJKkW3dDNPY8PjhJ5G0lBRvu0e3EWGxGy5cI4LAGA7gV2UHCtWBI4EMXK8c9nQKA==", + "dev": true, + "requires": { + "@jest/environment": "^29.5.0", + "@jest/expect": "^29.5.0", + "@jest/test-result": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^0.7.0", + "is-generator-fn": "^2.0.0", + "jest-each": "^29.5.0", + "jest-matcher-utils": "^29.5.0", + "jest-message-util": "^29.5.0", + "jest-runtime": "^29.5.0", + "jest-snapshot": "^29.5.0", + "jest-util": "^29.5.0", + "p-limit": "^3.1.0", + "pretty-format": "^29.5.0", + "pure-rand": "^6.0.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + } + }, + "jest-cli": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.5.0.tgz", + "integrity": "sha512-L1KcP1l4HtfwdxXNFCL5bmUbLQiKrakMUriBEcc1Vfz6gx31ORKdreuWvmQVBit+1ss9NNR3yxjwfwzZNdQXJw==", + "dev": true, + "requires": { + "@jest/core": "^29.5.0", + "@jest/test-result": "^29.5.0", + "@jest/types": "^29.5.0", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "import-local": "^3.0.2", + "jest-config": "^29.5.0", + "jest-util": "^29.5.0", + "jest-validate": "^29.5.0", + "prompts": "^2.0.1", + "yargs": "^17.3.1" + } + }, + "jest-config": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.5.0.tgz", + "integrity": "sha512-kvDUKBnNJPNBmFFOhDbm59iu1Fii1Q6SxyhXfvylq3UTHbg6o7j/g8k2dZyXWLvfdKB1vAPxNZnMgtKJcmu3kA==", + "dev": true, + "requires": { + "@babel/core": "^7.11.6", + "@jest/test-sequencer": "^29.5.0", + "@jest/types": "^29.5.0", + "babel-jest": "^29.5.0", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-circus": "^29.5.0", + "jest-environment-node": "^29.5.0", + "jest-get-type": "^29.4.3", + "jest-regex-util": "^29.4.3", + "jest-resolve": "^29.5.0", + "jest-runner": "^29.5.0", + "jest-util": "^29.5.0", + "jest-validate": "^29.5.0", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^29.5.0", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + } + }, + "jest-diff": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.5.0.tgz", + "integrity": "sha512-LtxijLLZBduXnHSniy0WMdaHjmQnt3g5sa16W4p0HqukYTTsyTW3GD1q41TyGl5YFXj/5B2U6dlh5FM1LIMgxw==", + "dev": true, + "requires": { + "chalk": "^4.0.0", + "diff-sequences": "^29.4.3", + "jest-get-type": "^29.4.3", + "pretty-format": "^29.5.0" + } + }, + "jest-docblock": { + "version": "29.4.3", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.4.3.tgz", + "integrity": "sha512-fzdTftThczeSD9nZ3fzA/4KkHtnmllawWrXO69vtI+L9WjEIuXWs4AmyME7lN5hU7dB0sHhuPfcKofRsUb/2Fg==", + "dev": true, + "requires": { + "detect-newline": "^3.0.0" + } + }, + "jest-each": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.5.0.tgz", + "integrity": "sha512-HM5kIJ1BTnVt+DQZ2ALp3rzXEl+g726csObrW/jpEGl+CDSSQpOJJX2KE/vEg8cxcMXdyEPu6U4QX5eruQv5hA==", + "dev": true, + "requires": { + "@jest/types": "^29.5.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.4.3", + "jest-util": "^29.5.0", + "pretty-format": "^29.5.0" + } + }, + "jest-environment-node": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.5.0.tgz", + "integrity": "sha512-ExxuIK/+yQ+6PRGaHkKewYtg6hto2uGCgvKdb2nfJfKXgZ17DfXjvbZ+jA1Qt9A8EQSfPnt5FKIfnOO3u1h9qw==", + "dev": true, + "requires": { + "@jest/environment": "^29.5.0", + "@jest/fake-timers": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/node": "*", + "jest-mock": "^29.5.0", + "jest-util": "^29.5.0" + } + }, + "jest-get-type": { + "version": "29.4.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.4.3.tgz", + "integrity": "sha512-J5Xez4nRRMjk8emnTpWrlkyb9pfRQQanDrvWHhsR1+VUfbwxi30eVcZFlcdGInRibU4G5LwHXpI7IRHU0CY+gg==", + "dev": true + }, + "jest-haste-map": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.5.0.tgz", + "integrity": "sha512-IspOPnnBro8YfVYSw6yDRKh/TiCdRngjxeacCps1cQ9cgVN6+10JUcuJ1EabrgYLOATsIAigxA0rLR9x/YlrSA==", + "dev": true, + "requires": { + "@jest/types": "^29.5.0", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "fsevents": "^2.3.2", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.4.3", + "jest-util": "^29.5.0", + "jest-worker": "^29.5.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + } + }, + "jest-leak-detector": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.5.0.tgz", + "integrity": "sha512-u9YdeeVnghBUtpN5mVxjID7KbkKE1QU4f6uUwuxiY0vYRi9BUCLKlPEZfDGR67ofdFmDz9oPAy2G92Ujrntmow==", + "dev": true, + "requires": { + "jest-get-type": "^29.4.3", + "pretty-format": "^29.5.0" + } + }, + "jest-matcher-utils": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.5.0.tgz", + "integrity": "sha512-lecRtgm/rjIK0CQ7LPQwzCs2VwW6WAahA55YBuI+xqmhm7LAaxokSB8C97yJeYyT+HvQkH741StzpU41wohhWw==", + "dev": true, + "requires": { + "chalk": "^4.0.0", + "jest-diff": "^29.5.0", + "jest-get-type": "^29.4.3", + "pretty-format": "^29.5.0" + } + }, + "jest-message-util": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.5.0.tgz", + "integrity": "sha512-Kijeg9Dag6CKtIDA7O21zNTACqD5MD/8HfIV8pdD94vFyFuer52SigdC3IQMhab3vACxXMiFk+yMHNdbqtyTGA==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.5.0", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.5.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + } + }, + "jest-mock": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.5.0.tgz", + "integrity": "sha512-GqOzvdWDE4fAV2bWQLQCkujxYWL7RxjCnj71b5VhDAGOevB3qj3Ovg26A5NI84ZpODxyzaozXLOh2NCgkbvyaw==", + "dev": true, + "requires": { + "@jest/types": "^29.5.0", + "@types/node": "*", + "jest-util": "^29.5.0" + } + }, + "jest-pnp-resolver": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", + "dev": true, + "requires": {} + }, + "jest-regex-util": { + "version": "29.4.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.4.3.tgz", + "integrity": "sha512-O4FglZaMmWXbGHSQInfXewIsd1LMn9p3ZXB/6r4FOkyhX2/iP/soMG98jGvk/A3HAN78+5VWcBGO0BJAPRh4kg==", + "dev": true + }, + "jest-resolve": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.5.0.tgz", + "integrity": "sha512-1TzxJ37FQq7J10jPtQjcc+MkCkE3GBpBecsSUWJ0qZNJpmg6m0D9/7II03yJulm3H/fvVjgqLh/k2eYg+ui52w==", + "dev": true, + "requires": { + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.5.0", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^29.5.0", + "jest-validate": "^29.5.0", + "resolve": "^1.20.0", + "resolve.exports": "^2.0.0", + "slash": "^3.0.0" + } + }, + "jest-resolve-dependencies": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.5.0.tgz", + "integrity": "sha512-sjV3GFr0hDJMBpYeUuGduP+YeCRbd7S/ck6IvL3kQ9cpySYKqcqhdLLC2rFwrcL7tz5vYibomBrsFYWkIGGjOg==", + "dev": true, + "requires": { + "jest-regex-util": "^29.4.3", + "jest-snapshot": "^29.5.0" + } + }, + "jest-runner": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.5.0.tgz", + "integrity": "sha512-m7b6ypERhFghJsslMLhydaXBiLf7+jXy8FwGRHO3BGV1mcQpPbwiqiKUR2zU2NJuNeMenJmlFZCsIqzJCTeGLQ==", + "dev": true, + "requires": { + "@jest/console": "^29.5.0", + "@jest/environment": "^29.5.0", + "@jest/test-result": "^29.5.0", + "@jest/transform": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^29.4.3", + "jest-environment-node": "^29.5.0", + "jest-haste-map": "^29.5.0", + "jest-leak-detector": "^29.5.0", + "jest-message-util": "^29.5.0", + "jest-resolve": "^29.5.0", + "jest-runtime": "^29.5.0", + "jest-util": "^29.5.0", + "jest-watcher": "^29.5.0", + "jest-worker": "^29.5.0", + "p-limit": "^3.1.0", + "source-map-support": "0.5.13" + } + }, + "jest-runtime": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.5.0.tgz", + "integrity": "sha512-1Hr6Hh7bAgXQP+pln3homOiEZtCDZFqwmle7Ew2j8OlbkIu6uE3Y/etJQG8MLQs3Zy90xrp2C0BRrtPHG4zryw==", + "dev": true, + "requires": { + "@jest/environment": "^29.5.0", + "@jest/fake-timers": "^29.5.0", + "@jest/globals": "^29.5.0", + "@jest/source-map": "^29.4.3", + "@jest/test-result": "^29.5.0", + "@jest/transform": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/node": "*", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.5.0", + "jest-message-util": "^29.5.0", + "jest-mock": "^29.5.0", + "jest-regex-util": "^29.4.3", + "jest-resolve": "^29.5.0", + "jest-snapshot": "^29.5.0", + "jest-util": "^29.5.0", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + } + }, + "jest-snapshot": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.5.0.tgz", + "integrity": "sha512-x7Wolra5V0tt3wRs3/ts3S6ciSQVypgGQlJpz2rsdQYoUKxMxPNaoHMGJN6qAuPJqS+2iQ1ZUn5kl7HCyls84g==", + "dev": true, + "requires": { + "@babel/core": "^7.11.6", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-jsx": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/traverse": "^7.7.2", + "@babel/types": "^7.3.3", + "@jest/expect-utils": "^29.5.0", + "@jest/transform": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/babel__traverse": "^7.0.6", + "@types/prettier": "^2.1.5", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^29.5.0", + "graceful-fs": "^4.2.9", + "jest-diff": "^29.5.0", + "jest-get-type": "^29.4.3", + "jest-matcher-utils": "^29.5.0", + "jest-message-util": "^29.5.0", + "jest-util": "^29.5.0", + "natural-compare": "^1.4.0", + "pretty-format": "^29.5.0", + "semver": "^7.3.5" + } + }, + "jest-util": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.5.0.tgz", + "integrity": "sha512-RYMgG/MTadOr5t8KdhejfvUU82MxsCu5MF6KuDUHl+NuwzUt+Sm6jJWxTJVrDR1j5M/gJVCPKQEpWXY+yIQ6lQ==", + "dev": true, + "requires": { + "@jest/types": "^29.5.0", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + } + }, + "jest-validate": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.5.0.tgz", + "integrity": "sha512-pC26etNIi+y3HV8A+tUGr/lph9B18GnzSRAkPaaZJIE1eFdiYm6/CewuiJQ8/RlfHd1u/8Ioi8/sJ+CmbA+zAQ==", + "dev": true, + "requires": { + "@jest/types": "^29.5.0", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.4.3", + "leven": "^3.1.0", + "pretty-format": "^29.5.0" + }, + "dependencies": { + "camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true + } + } + }, + "jest-watcher": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.5.0.tgz", + "integrity": "sha512-KmTojKcapuqYrKDpRwfqcQ3zjMlwu27SYext9pt4GlF5FUgB+7XE1mcCnSm6a4uUpFyQIkb6ZhzZvHl+jiBCiA==", + "dev": true, + "requires": { + "@jest/test-result": "^29.5.0", + "@jest/types": "^29.5.0", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "jest-util": "^29.5.0", + "string-length": "^4.0.1" + } + }, + "jest-worker": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.5.0.tgz", + "integrity": "sha512-NcrQnevGoSp4b5kg+akIpthoAFHxPBcb5P6mYPY0fUNT+sSvmtu6jlkEle3anczUKIKEbMxFimk9oTP/tpIPgA==", + "dev": true, + "requires": { + "@types/node": "*", + "jest-util": "^29.5.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "dependencies": { + "supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } }, "js-sdsl": { "version": "4.4.0", @@ -2502,6 +7840,12 @@ "integrity": "sha512-FfVSdx6pJ41Oa+CF7RDaFmTnCaFhua+SNYQX74riGOpl96x+2jQCqEfQ2bnXu/5DPCqlRuiqyvTJM0Qjz26IVg==", "dev": true }, + "js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, "js-yaml": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", @@ -2511,6 +7855,18 @@ "argparse": "^2.0.1" } }, + "jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "dev": true + }, + "json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true + }, "json-schema-traverse": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", @@ -2523,6 +7879,34 @@ "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", "dev": true }, + "json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true + }, + "jsonpath": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/jsonpath/-/jsonpath-1.1.1.tgz", + "integrity": "sha512-l6Cg7jRpixfbgoWgkrl77dgEj8RPvND0wMH6TwQmi9Qs4TFfS9u5cUFnbeKTwj5ga5Y3BTGGNI28k117LJ009w==", + "requires": { + "esprima": "1.2.2", + "static-eval": "2.0.2", + "underscore": "1.12.1" + } + }, + "kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true + }, + "leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true + }, "levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -2533,6 +7917,12 @@ "type-check": "~0.4.0" } }, + "lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true + }, "locate-path": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", @@ -2542,6 +7932,12 @@ "p-locate": "^5.0.0" } }, + "lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==", + "dev": true + }, "lodash.merge": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", @@ -2557,6 +7953,44 @@ "yallist": "^4.0.0" } }, + "make-dir": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "dev": true, + "requires": { + "semver": "^6.0.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + } + } + }, + "make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true + }, + "makeerror": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", + "dev": true, + "requires": { + "tmpl": "1.0.5" + } + }, + "merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true + }, "merge2": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", @@ -2586,6 +8020,12 @@ "mime-db": "1.52.0" } }, + "mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true + }, "minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", @@ -2613,6 +8053,33 @@ "integrity": "sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==", "dev": true }, + "node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "dev": true + }, + "node-releases": { + "version": "2.0.12", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.12.tgz", + "integrity": "sha512-QzsYKWhXTWx8h1kIvqfnC++o0pEmpRQA/aenALsL2F4pqNVr7YzcdMlDij5WBnwftRbJCNJL/O7zdKaxKPHqgQ==", + "dev": true + }, + "normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true + }, + "npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "requires": { + "path-key": "^3.0.0" + } + }, "once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", @@ -2622,6 +8089,15 @@ "wrappy": "1" } }, + "onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "requires": { + "mimic-fn": "^2.1.0" + } + }, "optionator": { "version": "0.9.1", "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", @@ -2654,6 +8130,12 @@ "p-limit": "^3.0.2" } }, + "p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true + }, "parent-module": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", @@ -2663,6 +8145,18 @@ "callsites": "^3.0.0" } }, + "parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + } + }, "path-exists": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", @@ -2681,24 +8175,119 @@ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", "dev": true }, + "path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true + }, "path-type": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", "dev": true }, + "picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "dev": true + }, "picomatch": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", "dev": true }, + "pirates": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.5.tgz", + "integrity": "sha512-8V9+HQPupnaXMA23c5hvl69zXvTwTzyAYasnkb0Tts4XvO4CliqONMOnvlq26rkhLC3nWDFBJf73LU1e1VZLaQ==", + "dev": true + }, + "pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "requires": { + "find-up": "^4.0.0" + }, + "dependencies": { + "find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "requires": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + } + }, + "locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "requires": { + "p-locate": "^4.1.0" + } + }, + "p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "requires": { + "p-try": "^2.0.0" + } + }, + "p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "requires": { + "p-limit": "^2.2.0" + } + } + } + }, "prelude-ls": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", "dev": true }, + "pretty-format": { + "version": "29.5.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.5.0.tgz", + "integrity": "sha512-V2mGkI31qdttvTFX7Mt4efOqHXqJWMu4/r66Xh3Z3BwZaPfPJgp6/gbwoujRpPUtfEF6AUUWx3Jim3GCw5g/Qw==", + "dev": true, + "requires": { + "@jest/schemas": "^29.4.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true + } + } + }, + "prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "requires": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + } + }, "proxy-from-env": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", @@ -2710,23 +8299,75 @@ "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", "dev": true }, + "pure-rand": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.0.2.tgz", + "integrity": "sha512-6Yg0ekpKICSjPswYOuC5sku/TSWaRYlA0qsXqJgM/d/4pLPHPuTxK7Nbf7jFKzAeedUhR8C7K9Uv63FBsSo8xQ==", + "dev": true + }, "queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", "dev": true }, + "react-is": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", + "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==", + "dev": true + }, "reflect-metadata": { "version": "0.1.13", "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.1.13.tgz", "integrity": "sha512-Ts1Y/anZELhSsjMcU605fU9RE4Oi3p5ORujwbIKXfWa+0Zxs510Qrmrce5/Jowq3cHSZSJqBjypxmHarc+vEWg==" }, + "require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true + }, + "resolve": { + "version": "1.22.2", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz", + "integrity": "sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==", + "dev": true, + "requires": { + "is-core-module": "^2.11.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + } + }, + "resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "requires": { + "resolve-from": "^5.0.0" + }, + "dependencies": { + "resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true + } + } + }, "resolve-from": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", "dev": true }, + "resolve.exports": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.2.tgz", + "integrity": "sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==", + "dev": true + }, "reusify": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", @@ -2775,12 +8416,92 @@ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", "dev": true }, + "signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true + }, "slash": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", "dev": true }, + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "devOptional": true + }, + "source-map-support": { + "version": "0.5.13", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", + "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", + "dev": true, + "requires": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true + }, + "stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "dev": true, + "requires": { + "escape-string-regexp": "^2.0.0" + }, + "dependencies": { + "escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "dev": true + } + } + }, + "static-eval": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/static-eval/-/static-eval-2.0.2.tgz", + "integrity": "sha512-N/D219Hcr2bPjLxPiV+TQE++Tsmrady7TqAJugLy7Xk1EumfDWS/f5dtBbkRCGE7wKKXuYockQoj8Rm2/pVKyg==", + "requires": { + "escodegen": "^1.8.1" + } + }, + "string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "requires": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + } + }, + "string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + } + }, "strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", @@ -2790,6 +8511,18 @@ "ansi-regex": "^5.0.1" } }, + "strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true + }, + "strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true + }, "strip-json-comments": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", @@ -2805,12 +8538,41 @@ "has-flag": "^4.0.0" } }, + "supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true + }, + "test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "requires": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + } + }, "text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", "dev": true }, + "tmpl": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", + "dev": true + }, + "to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "dev": true + }, "to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", @@ -2820,6 +8582,43 @@ "is-number": "^7.0.0" } }, + "ts-jest": { + "version": "29.1.0", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.0.tgz", + "integrity": "sha512-ZhNr7Z4PcYa+JjMl62ir+zPiNJfXJN6E8hSLnaUKhOgqcn8vb3e537cpkd0FuAfRK3sR1LSqM1MOhliXNgOFPA==", + "dev": true, + "requires": { + "bs-logger": "0.x", + "fast-json-stable-stringify": "2.x", + "jest-util": "^29.0.0", + "json5": "^2.2.3", + "lodash.memoize": "4.x", + "make-error": "1.x", + "semver": "7.x", + "yargs-parser": "^21.0.1" + } + }, + "ts-node": { + "version": "10.9.1", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz", + "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==", + "dev": true, + "requires": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + } + }, "tslib": { "version": "1.14.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", @@ -2844,6 +8643,12 @@ "prelude-ls": "^1.2.1" } }, + "type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true + }, "type-fest": { "version": "0.20.2", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", @@ -2856,6 +8661,21 @@ "integrity": "sha512-CIfGzTelbKNEnLpLdGFgdyKhG23CKdKgQPOBc+OUNrkJ2vr+KSzsSV5kq5iWhEQbok+quxgGzrAtGWCyU7tHnA==", "dev": true }, + "underscore": { + "version": "1.12.1", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.12.1.tgz", + "integrity": "sha512-hEQt0+ZLDVUMhebKxL4x1BTtDY7bavVofhZ9KZ4aI26X9SRaE+Y3m83XUL1UP2jn8ynjndwCCpEHdUG+9pP1Tw==" + }, + "update-browserslist-db": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", + "integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==", + "dev": true, + "requires": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0" + } + }, "uri-js": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", @@ -2865,6 +8685,40 @@ "punycode": "^2.1.0" } }, + "v8-compile-cache-lib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", + "dev": true + }, + "v8-to-istanbul": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.1.0.tgz", + "integrity": "sha512-6z3GW9x8G1gd+JIIgQQQxXuiJtCXeAjp6RaPEPLv62mH3iPHPxV6W3robxtCzNErRo6ZwTmzWhsbNvjyEBKzKA==", + "dev": true, + "requires": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^1.6.0" + }, + "dependencies": { + "convert-source-map": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", + "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", + "dev": true + } + } + }, + "walker": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", + "dev": true, + "requires": { + "makeerror": "1.0.12" + } + }, "which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", @@ -2877,8 +8731,18 @@ "word-wrap": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", - "dev": true + "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==" + }, + "wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + } }, "wrappy": { "version": "1.0.2", @@ -2886,12 +8750,55 @@ "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", "dev": true }, + "write-file-atomic": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", + "dev": true, + "requires": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + } + }, + "y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true + }, "yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", "dev": true }, + "yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "requires": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + } + }, + "yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true + }, + "yn": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", + "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", + "dev": true + }, "yocto-queue": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", diff --git a/package.json b/package.json index 1ca5d7b..bf4cf8b 100755 --- a/package.json +++ b/package.json @@ -1,17 +1,19 @@ { "name": "@speakeasy-api/openai", - "version": "1.9.2", + "version": "1.10.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" }, "dependencies": { "axios": "^1.1.3", + "jsonpath": "^1.1.1", "class-transformer": "^0.5.1", "form-data": "^4.0.0", "reflect-metadata": "^0.1.13" }, "devDependencies": { + "@types/jsonpath": "^0.2.0", "@types/node": "^18.11.5", "typescript": "^4.8.4", "@typescript-eslint/eslint-plugin": "^5.56.0", @@ -22,5 +24,10 @@ "files": [ "dist", "README.md" - ] + ], + "repository": { + "type": "git", + "url": "https://github.com/speakeasy-sdks/openai-ts-sdk.git", + "directory": "." + } } diff --git a/src/internal/utils/headers.ts b/src/internal/utils/headers.ts index afa4ab0..d570fda 100755 --- a/src/internal/utils/headers.ts +++ b/src/internal/utils/headers.ts @@ -5,12 +5,12 @@ import { AxiosResponseHeaders, RawAxiosResponseHeaders } from "axios"; import { ParamDecorator, - convertIfDateObjectToISOString, isBooleanRecord, isEmpty, isNumberRecord, isStringRecord, parseParamDecorator, + valToString, } from "./utils"; import { requestMetadataKey } from "./requestbody"; @@ -51,8 +51,7 @@ export function getHeadersFromRequest(headerParams: any): any { const value: string = serializeHeader( headerParams[fname], - headerDecorator.Explode, - headerDecorator.DateTimeFormat + headerDecorator.Explode ); if (value != "") headers[headerDecorator.ParamName] = value; @@ -89,16 +88,12 @@ export function getHeadersFromResponse( return reponseHeaders; } -function serializeHeader( - header: any, - explode: boolean, - dateTimeFormat?: string -): string { +function serializeHeader(header: any, explode: boolean): string { const headerVals: string[] = []; if (Array.isArray(header)) { header.forEach((val: any) => { - headerVals.push(convertIfDateObjectToISOString(val, dateTimeFormat)); + headerVals.push(valToString(val)); }); } else if ( isStringRecord(header) || @@ -106,8 +101,9 @@ function serializeHeader( isBooleanRecord(header) ) { Object.getOwnPropertyNames(header).forEach((headerKey: string) => { - if (explode) headerVals.push(`${headerKey}=${header[headerKey]}`); - else headerVals.push(`${headerKey},${header[headerKey]}`); + if (explode) + headerVals.push(`${headerKey}=${valToString(header[headerKey])}`); + else headerVals.push(`${headerKey},${valToString(header[headerKey])}`); }); } else if (header instanceof Object) { Object.getOwnPropertyNames(header).forEach((headerKey: string) => { @@ -128,10 +124,7 @@ function serializeHeader( if (headerDecorator == null) return; - const headerFieldValue = convertIfDateObjectToISOString( - header[headerKey], - headerDecorator.DateTimeFormat - ); + const headerFieldValue = valToString(header[headerKey]); if (isEmpty(headerFieldValue)) return; else if (explode) diff --git a/src/internal/utils/pathparams.ts b/src/internal/utils/pathparams.ts index caa8906..cdc9d8c 100755 --- a/src/internal/utils/pathparams.ts +++ b/src/internal/utils/pathparams.ts @@ -3,15 +3,13 @@ */ import { - encodeAndConvertPrimitiveVal, ParamDecorator, - parseParamDecorator, -} from "./utils"; -import { - isStringRecord, - isNumberRecord, isBooleanRecord, isEmpty, + isNumberRecord, + isStringRecord, + parseParamDecorator, + valToString, } from "./utils"; export const ppMetadataKey = "pathParam"; @@ -19,15 +17,14 @@ export const ppMetadataKey = "pathParam"; export function getSimplePathParams( paramName: string, paramValue: any, - explode: boolean, - dateTimeFormat?: string + explode: boolean ): Map { const pathParams: Map = new Map(); const ppVals: string[] = []; if (Array.isArray(paramValue)) { paramValue.forEach((param) => { - ppVals.push(encodeAndConvertPrimitiveVal(param, dateTimeFormat)); + ppVals.push(encodeURIComponent(valToString(param))); }); pathParams.set(paramName, ppVals.join(",")); } else if ( @@ -36,9 +33,8 @@ export function getSimplePathParams( isBooleanRecord(paramValue) ) { Object.getOwnPropertyNames(paramValue).forEach((paramKey: string) => { - const paramFieldValue = encodeAndConvertPrimitiveVal( - paramValue[paramKey], - dateTimeFormat + const paramFieldValue = encodeURIComponent( + valToString(paramValue[paramKey]) ); if (explode) ppVals.push(`${paramKey}=${paramFieldValue}`); @@ -65,9 +61,8 @@ export function getSimplePathParams( if (ppDecorator == null) return; - const paramFieldValue = encodeAndConvertPrimitiveVal( - paramValue[paramKey], - ppDecorator.DateTimeFormat + const paramFieldValue = encodeURIComponent( + valToString(paramValue[paramKey]) ); if (isEmpty(paramFieldValue)) return; @@ -78,10 +73,7 @@ export function getSimplePathParams( pathParams.set(paramName, ppVals.join(",")); } else { - pathParams.set( - paramName, - encodeAndConvertPrimitiveVal(paramValue, dateTimeFormat) - ); + pathParams.set(paramName, encodeURIComponent(valToString(paramValue))); } return pathParams; } diff --git a/src/internal/utils/queryparams.ts b/src/internal/utils/queryparams.ts index 481cc66..cd39120 100755 --- a/src/internal/utils/queryparams.ts +++ b/src/internal/utils/queryparams.ts @@ -2,15 +2,9 @@ * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. */ -import { - convertIfDateObjectToISOString, - encodeAndConvertPrimitiveVal, - parseParamDecorator, - populateFromGlobals, -} from "./utils"; +import {ParamDecorator, parseParamDecorator, populateFromGlobals, shouldQueryParamSerialize, valToString} from "./utils"; -import { ParamDecorator } from "./utils"; -import { requestMetadataKey } from "./requestbody"; +import {requestMetadataKey} from "./requestbody"; export const qpMetadataKey = "queryParam"; const queryStringPrefix = "?"; @@ -39,7 +33,7 @@ export function serializeQueryParams(queryParams: any, globals?: any): string { fname ); - if (!qpAnn) return { serialize: () => "" }; + if (!qpAnn) return {serialize: () => ""}; const qpDecorator: ParamDecorator = parseParamDecorator( qpAnn, @@ -54,39 +48,38 @@ export function serializeQueryParams(queryParams: any, globals?: any): string { value = populateFromGlobals(value, fname, "queryParam", globals); if (qpDecorator.Serialization === "json") - queryStringParts.push(jsonSerializer({ [qpDecorator.ParamName]: value })); + queryStringParts.push(jsonSerializer({[qpDecorator.ParamName]: value})); else { switch (qpDecorator.Style) { case "deepObject": queryStringParts.push( - deepObjectSerializer( - { [qpDecorator.ParamName]: value }, - qpDecorator.DateTimeFormat - ) + deepObjectSerializer({[qpDecorator.ParamName]: value}) ); return; case "form": if (!qpDecorator.Explode) queryStringParts.push( - formSerializer( - { [qpDecorator.ParamName]: value }, - qpDecorator.DateTimeFormat - ) + noExplodeSerializer({[qpDecorator.ParamName]: value}) ); else queryStringParts.push( - formSerializerExplode( - { [qpDecorator.ParamName]: value }, - qpDecorator.DateTimeFormat - ) + formSerializerExplode({[qpDecorator.ParamName]: value}) ); return; + case "pipeDelimited": + if (!qpDecorator.Explode) { + queryStringParts.push( + noExplodeSerializer({[qpDecorator.ParamName]: value}, "|") + ); + } else { + queryStringParts.push( + formSerializerExplode({[qpDecorator.ParamName]: value}) + ); + } + return; default: queryStringParts.push( - formSerializerExplode( - { [qpDecorator.ParamName]: value }, - qpDecorator.DateTimeFormat - ) + formSerializerExplode({[qpDecorator.ParamName]: value}) ); } } @@ -99,53 +92,21 @@ function jsonSerializer(params: Record): string { const query: string[] = []; Object.entries(Object.assign({}, params)).forEach(([key, value]) => { - const values: string = Object.getOwnPropertyNames(value) - .map((paramKey: string) => { - const qpAnn: string = Reflect.getMetadata( - qpMetadataKey, - value, - paramKey - ); - - const qpDecorator: ParamDecorator = parseParamDecorator( - qpAnn, - paramKey, - "form", - true - ); - - if (qpDecorator == null) return; - - return `"${paramKey}":${JSON.stringify( - convertIfDateObjectToISOString( - value[paramKey], - qpDecorator.DateTimeFormat - ) - )}`; - }) - .join(","); - query.push(`${key}={${encodeURIComponent(values)}}`); + query.push(`${key}=${encodeURIComponent(JSON.stringify(value))}`); }); return query.join("&"); } // TODO: Add support for disabling percent encoding for reserved characters -function formSerializer( - params: Record, - dateTimeFormat?: string -): string { +function noExplodeSerializer(params: Record, delimiter = ","): string { const query: string[] = []; Object.entries(Object.assign({}, params)).forEach(([key, value]) => { - if (!value) return; + if (!shouldQueryParamSerialize(value)) return; if (value !== Object(value)) - query.push( - `${key}=${encodeAndConvertPrimitiveVal(value, dateTimeFormat)}` - ); + query.push(`${key}=${encodeURIComponent(valToString(value))}`); else if (Array.isArray(value)) { - const values: string = value - .map((aValue) => convertIfDateObjectToISOString(aValue, dateTimeFormat)) - .join(","); + const values: string = value.map((aValue) => aValue).join(delimiter); query.push(`${key}=${encodeURIComponent(values)}`); } else { @@ -166,12 +127,9 @@ function formSerializer( if (qpDecorator == null) return; - return `${paramKey},${convertIfDateObjectToISOString( - value[paramKey], - qpDecorator.DateTimeFormat - )}`; + return `${paramKey}${delimiter}${valToString(value[paramKey])}`; }) - .join(","); + .join(delimiter); query.push(`${key}=${encodeURIComponent(values)}`); } }); @@ -179,25 +137,17 @@ function formSerializer( } // TODO: Add support for disabling percent encoding for reserved characters -function formSerializerExplode( - params: Record, - dateTimeFormat?: string -): string { +function formSerializerExplode(params: Record): string { const query: string[] = []; Object.entries(Object.assign({}, params)).forEach(([key, value]) => { - if (!value) return; + if (!shouldQueryParamSerialize(value)) return; if (value !== Object(value)) - query.push( - `${key}=${encodeAndConvertPrimitiveVal(value, dateTimeFormat)}` - ); + query.push(`${key}=${encodeURIComponent(value)}`); else if (Array.isArray(value)) { query.push( value - .map( - (aValue) => - `${key}=${encodeAndConvertPrimitiveVal(aValue, dateTimeFormat)}` - ) + .map((aValue) => `${key}=${encodeURIComponent(valToString(aValue))}`) .join("&") ); } else @@ -219,9 +169,8 @@ function formSerializerExplode( if (qpDecorator == null) return; - return `${paramKey}=${encodeAndConvertPrimitiveVal( - value[paramKey], - qpDecorator.DateTimeFormat + return `${paramKey}=${encodeURIComponent( + valToString(value[paramKey]) )}`; }) .join("&") @@ -231,27 +180,19 @@ function formSerializerExplode( } // TODO: Add support for disabling percent encoding for reserved characters -function deepObjectSerializer( - params: Record, - dateTimeFormat?: string -): string { +function deepObjectSerializer(params: Record): string { const query: string[] = []; Object.entries(Object.assign({}, params)).forEach(([key, value]) => { - if (!value) return; + if (!shouldQueryParamSerialize(value)) return; if (value !== Object(value)) - query.push( - `${key}=${encodeAndConvertPrimitiveVal(value, dateTimeFormat)}` - ); + query.push(`${key}=${encodeURIComponent(value)}`); else if (Array.isArray(value)) { query.push( value .map( ([objKey, objValue]) => - `${key}[${objKey}]=${encodeAndConvertPrimitiveVal( - objValue, - dateTimeFormat - )}` + `${key}[${objKey}]=${encodeURIComponent(valToString(objValue))}` ) .join("&") ); @@ -279,15 +220,13 @@ function deepObjectSerializer( return value[paramKey] .map( (arrValue: any) => - `${key}[${paramKey}]=${encodeAndConvertPrimitiveVal( - arrValue, - qpDecorator.DateTimeFormat + `${key}[${paramKey}]=${encodeURIComponent( + valToString(arrValue) )}` ) .join("&"); - return `${key}[${paramKey}]=${encodeAndConvertPrimitiveVal( - value[paramKey], - qpDecorator.DateTimeFormat + return `${key}[${paramKey}]=${encodeURIComponent( + valToString(value[paramKey]) )}`; }) .join("&") diff --git a/src/internal/utils/requestbody.ts b/src/internal/utils/requestbody.ts index 686ee85..40f601b 100755 --- a/src/internal/utils/requestbody.ts +++ b/src/internal/utils/requestbody.ts @@ -2,15 +2,10 @@ * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. */ -import { - SerializationMethodToContentType, - convertIfDateObjectToISOString, - isBooleanRecord, - isNumberRecord, - isStringRecord, -} from "./utils"; +import {isBooleanRecord, isNumberRecord, isStringRecord, SerializationMethodToContentType, valToString,} from "./utils"; import FormData from "form-data"; +import {RFCDate} from "../../sdk/types"; export const requestMetadataKey = "request"; const mpFormMetadataKey = "multipart_form"; @@ -55,6 +50,9 @@ const serializeContentType = ( switch (contentType) { case "multipart/form-data": + requestBody = encodeMultipartFormData(reqBody); + break; + case "multipart/mixed": requestBody = encodeMultipartFormData(reqBody); requestHeaders = (requestBody as FormData).getHeaders(); @@ -62,7 +60,7 @@ const serializeContentType = ( case "application/x-www-form-urlencoded": [requestHeaders, requestBody] = [ - { "Content-Type": `${contentType}` }, + {"Content-Type": `${contentType}`}, encodeFormUrlEncodeData(reqBody), ]; break; @@ -70,7 +68,7 @@ const serializeContentType = ( case "application/json": case "text/json": [requestHeaders, requestBody] = [ - { "Content-Type": `${contentType}` }, + {"Content-Type": `${contentType}`}, reqBody, ]; break; @@ -83,7 +81,7 @@ const serializeContentType = ( requestBody instanceof String || requestBody instanceof Uint8Array ) - requestHeaders = { "Content-Type": `${contentType}` }; + requestHeaders = {"Content-Type": `${contentType}`}; else throw new Error( `invalid request body type ${requestBodyType} for mediaType ${contentType}` @@ -120,17 +118,9 @@ const encodeFormUrlEncodeData = (data: any): FormData => { if (formDecorator.Style === "form") { let parsed: Record; if (formDecorator.Explode === true) { - parsed = formExplode( - fname, - data[fname], - formDecorator.DateTimeFormat - ); + parsed = formExplode(fname, data[fname]); } else { - parsed = formNotExplode( - fname, - data[fname], - formDecorator.DateTimeFormat - ); + parsed = formNotExplode(fname, data[fname]); } Object.keys(parsed).forEach((key) => { @@ -143,11 +133,7 @@ const encodeFormUrlEncodeData = (data: any): FormData => { return formData; }; -const formExplode = ( - fname: string, - data: any, - dateTimeFormat?: string -): Record => { +const formExplode = (fname: string, data: any): Record => { const exploded: Record = {}; if (Array.isArray(data)) { @@ -157,38 +143,30 @@ const formExplode = ( } exploded[fname].push(value); }); - } else { - if (typeof data === "object") { - if (data instanceof Date) { - if (!exploded[fname]) { - exploded[fname] = []; - } - exploded[fname].push( - convertIfDateObjectToISOString(data, dateTimeFormat) - ); - return exploded; + } else if (typeof data === "object") { + if (data instanceof Date || data instanceof RFCDate) { + if (!exploded[fname]) { + exploded[fname] = []; } + exploded[fname].push(valToString(data)); + } else { Object.keys(data).forEach((key) => { if (!exploded[key]) { exploded[key] = []; } exploded[key].push(data[key]); }); - } else { - if (!exploded[fname]) { - exploded[fname] = []; - } - exploded[fname].push(data.toString()); } + } else { + if (!exploded[fname]) { + exploded[fname] = []; + } + exploded[fname].push(valToString(data)); } return exploded; }; -const formNotExplode = ( - fname: string, - data: any, - dateTimeFormat?: string -): Record => { +const formNotExplode = (fname: string, data: any): Record => { const notExploded: Record = {}; if (Array.isArray(data)) { @@ -196,29 +174,25 @@ const formNotExplode = ( notExploded[fname] = []; } notExploded[fname].push(data.map((item) => item.toString()).join(",")); - } else { - if (typeof data === "object") { - if (data instanceof Date) { - if (!notExploded[fname]) { - notExploded[fname] = []; - } - notExploded[fname].push( - convertIfDateObjectToISOString(data, dateTimeFormat) - ); - return notExploded; + } else if (typeof data === "object") { + if (data instanceof Date || data instanceof RFCDate) { + if (!notExploded[fname]) { + notExploded[fname] = []; } + notExploded[fname].push(valToString(data)); + } else { Object.keys(data).forEach((key) => { if (!notExploded[key]) { notExploded[key] = []; } notExploded[fname].push(`${key}=${data[key]}`); }); - } else { - if (!notExploded[fname]) { - notExploded[fname] = []; - } - notExploded[fname].push(data.toString()); } + } else { + if (!notExploded[fname]) { + notExploded[fname] = []; + } + notExploded[fname].push(valToString(data)); } return notExploded; }; @@ -245,8 +219,6 @@ function parseFormDecorator(formAnn: string): FormDecorator { case "json": formDecorator.JSON = formVal === "true"; break; - case "dateTimeFormat": - formDecorator.DateTimeFormat = formVal; } }); @@ -258,7 +230,6 @@ class FormDecorator { Style?: string; Explode?: boolean; JSON?: boolean; - DateTimeFormat?: string; constructor( Name?: string, @@ -292,38 +263,14 @@ function encodeMultipartFormData(form: any): FormData { if (mpFormDecorator.File) return encodeMultipartFormDataFile(formData, form[fname]); else if (mpFormDecorator.JSON) { - formData.append( - mpFormDecorator.Name, - JSON.stringify(form[fname], (key, value) => { - return convertIfDateObjectToISOString( - value, - mpFormDecorator.DateTimeFormat - ); - }) - ); + formData.append(mpFormDecorator.Name, JSON.stringify(form[fname])); } else { if (Array.isArray(form[fname])) { form[fname].forEach((val: any) => { - formData.append( - mpFormDecorator.Name + "[]", - String( - convertIfDateObjectToISOString( - val, - mpFormDecorator.DateTimeFormat - ) - ) - ); + formData.append(mpFormDecorator.Name + "[]", valToString(val)); }); } else { - formData.append( - mpFormDecorator.Name, - String( - convertIfDateObjectToISOString( - form[fname], - mpFormDecorator.DateTimeFormat - ) - ) - ); + formData.append(mpFormDecorator.Name, valToString(form[fname])); } } }); @@ -391,8 +338,6 @@ function parseMultipartFormDecorator( case "json": mpFormDecorator.JSON = mpFormVal == "true"; break; - case "dateTimeFormat": - mpFormDecorator.DateTimeFormat = mpFormVal; } }); @@ -404,7 +349,6 @@ class MultipartFormDecorator { Content: boolean; JSON: boolean; Name: string; - DateTimeFormat?: string; constructor(File: boolean, Content: boolean, JSON: boolean, Name: string) { this.File = File; diff --git a/src/internal/utils/security.ts b/src/internal/utils/security.ts index b109f9d..d8939d6 100755 --- a/src/internal/utils/security.ts +++ b/src/internal/utils/security.ts @@ -185,7 +185,7 @@ function parseSecuritySchemeValue( case "basic": break; case "bearer": - client.defaults.headers.common[securityDecorator.Name] = value; + client.defaults.headers.common[securityDecorator.Name] = value.toLowerCase().startsWith("bearer ") ? value : `Bearer ${value}`; break; default: throw new Error("not supported"); diff --git a/src/internal/utils/utils.ts b/src/internal/utils/utils.ts index 580c968..4388e25 100755 --- a/src/internal/utils/utils.ts +++ b/src/internal/utils/utils.ts @@ -4,10 +4,11 @@ import "reflect-metadata"; -import { getSimplePathParams, ppMetadataKey } from "./pathparams"; +import {getSimplePathParams, ppMetadataKey} from "./pathparams"; -import { plainToInstance } from "class-transformer"; -import { requestMetadataKey } from "./requestbody"; +import {plainToInstance} from "class-transformer"; +import {RFCDate} from "../../sdk/types"; +import {requestMetadataKey} from "./requestbody"; export const SerializationMethodToContentType: Record = { json: "application/json", @@ -25,7 +26,7 @@ export interface PropInfo { } function isSpeakeasyBase(type: any): boolean { - return type && Object.getPrototypeOf(type)?.name == "SpeakeasyBase"; + return type && Object.getPrototypeOf(type)?.name == SpeakeasyBase.name; } function handleArray(value: any, elemType: any, elemDepth: number): any { @@ -100,6 +101,12 @@ export class SpeakeasyBase { prop.elemType, prop.elemDepth ); + } else if (prop.type.name == "RFCDate") { + if (value instanceof Date) { + (this as any)[prop.key] = new RFCDate(value); + } else { + (this as any)[prop.key] = value; + } } else { (this as any)[prop.key] = value; } @@ -114,19 +121,16 @@ export class ParamDecorator { Explode: boolean; ParamName: string; Serialization?: string; - DateTimeFormat?: string; constructor( Style: string, Explode: boolean, ParamName: string, - Serialization?: string, - DateTimeFormat?: string + Serialization?: string ) { this.Style = Style; this.Explode = Explode; this.ParamName = ParamName; this.Serialization = Serialization; - this.DateTimeFormat = DateTimeFormat; } } @@ -216,17 +220,26 @@ export function generateURL( let value = pathParams[fname]; value = populateFromGlobals(value, fname, "pathParam", globals); - switch (ppDecorator.Style) { - case "simple": { - const simpleParams: Map = getSimplePathParams( - ppDecorator.ParamName, - value, - ppDecorator.Explode, - ppDecorator.DateTimeFormat - ); - simpleParams.forEach((value, key) => { - parsedParameters[key] = value; - }); + if (ppDecorator.Serialization) { + switch (ppDecorator.Serialization) { + case "json": + parsedParameters[ppDecorator.ParamName] = encodeURIComponent( + JSON.stringify(value) + ); + break; + } + } else { + switch (ppDecorator.Style) { + case "simple": { + const simpleParams: Map = getSimplePathParams( + ppDecorator.ParamName, + value, + ppDecorator.Explode + ); + simpleParams.forEach((value, key) => { + parsedParameters[key] = value; + }); + } } } }); @@ -262,8 +275,6 @@ export function parseParamDecorator( case "serialization": decorator.Serialization = paramVal; break; - case "dateTimeFormat": - decorator.DateTimeFormat = paramVal; } }); return decorator; @@ -307,45 +318,7 @@ export function isEmpty(value: any): boolean { return res || value == null; } -// If value is Date type, serialize as ISO string since Date constructor creates from system clock -export function convertIfDateObjectToISOString( - value: any, - dateTimeFormat?: string -): any { - const dtFormat = dateTimeFormat ?? "YYYY-MM-DDThh:mm:ss.sssZ"; - if (value instanceof Date) { - if (dtFormat === "YYYY-MM-DD") { - const dateRegex = /^(\d{4})-(\d{2})-(\d{2})/; - - const matches = value.toISOString().match(dateRegex); - if (matches == null) { - throw new Error("Date format is not valid"); - } - - const [, year, month, day]: RegExpMatchArray = matches; - return `${year}-${month}-${day}`; - } - if (dtFormat === "YYYY-MM-DDThh:mm:ss.sssZ") { - return value.toISOString(); - } - } - return value; -} - -export function encodeAndConvertPrimitiveVal( - value: any, - dateTimeFormat?: string -): any { - return encodeURIComponent( - convertIfDateObjectToISOString(value, dateTimeFormat) - ); -} - -export function deserializeJSONResponse( - value: T, - klass?: any, - elemDepth = 0 -): any { +export function objectToClass(value: T, klass?: any, elemDepth = 0): any { if (value !== Object(value)) { return value; } @@ -353,27 +326,25 @@ export function deserializeJSONResponse( if (elemDepth === 0 && klass != null) { return plainToInstance(klass, value, { excludeExtraneousValues: true, + exposeUnsetFields: false, }) as typeof klass; } if (Array.isArray(value)) { - return value.map((v) => deserializeJSONResponse(v, klass, elemDepth - 1)); + return value.map((v) => objectToClass(v, klass, elemDepth - 1)); } if (typeof value === "object" && value != null) { const copiedRecord: Record = {}; for (const key in value) { - copiedRecord[key] = deserializeJSONResponse( - value[key], - klass, - elemDepth - 1 - ); + copiedRecord[key] = objectToClass(value[key], klass, elemDepth - 1); } return copiedRecord; } return plainToInstance(klass, value, { excludeExtraneousValues: true, + exposeUnsetFields: false, }) as typeof klass; } @@ -417,3 +388,15 @@ export function populateFromGlobals( return value; } + +export function valToString(value: any): string { + if (value instanceof Date) { + return value.toISOString(); + } + + return value.toString(); +} + +export function shouldQueryParamSerialize(value: any): boolean { + return !(value === undefined || value === null || value === "") +} diff --git a/src/sdk/models/operations/cancelfinetune.ts b/src/sdk/models/operations/cancelfinetune.ts index 582c7f6..6797488 100755 --- a/src/sdk/models/operations/cancelfinetune.ts +++ b/src/sdk/models/operations/cancelfinetune.ts @@ -3,34 +3,33 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CancelFineTuneRequest extends SpeakeasyBase { - /** - * The ID of the fine-tune job to cancel - * - * @remarks - * - */ - @SpeakeasyMetadata({ - data: "pathParam, style=simple;explode=false;name=fine_tune_id", - }) - fineTuneId: string; + /** + * The ID of the fine-tune job to cancel + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=fine_tune_id" }) + fineTuneId: string; } export class CancelFineTuneResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - fineTune?: any; + /** + * OK + */ + @SpeakeasyMetadata() + fineTune?: shared.FineTune; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createanswer.ts b/src/sdk/models/operations/createanswer.ts index 95f8036..a3d5554 100755 --- a/src/sdk/models/operations/createanswer.ts +++ b/src/sdk/models/operations/createanswer.ts @@ -7,18 +7,18 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateAnswerResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - createAnswerResponse?: shared.CreateAnswerResponse; + /** + * OK + */ + @SpeakeasyMetadata() + createAnswerResponse?: shared.CreateAnswerResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createchatcompletion.ts b/src/sdk/models/operations/createchatcompletion.ts index 985031d..8b78169 100755 --- a/src/sdk/models/operations/createchatcompletion.ts +++ b/src/sdk/models/operations/createchatcompletion.ts @@ -7,18 +7,18 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateChatCompletionResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - createChatCompletionResponse?: shared.CreateChatCompletionResponse; + /** + * OK + */ + @SpeakeasyMetadata() + createChatCompletionResponse?: shared.CreateChatCompletionResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createclassification.ts b/src/sdk/models/operations/createclassification.ts index 49472d9..97c359f 100755 --- a/src/sdk/models/operations/createclassification.ts +++ b/src/sdk/models/operations/createclassification.ts @@ -7,18 +7,18 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateClassificationResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - createClassificationResponse?: shared.CreateClassificationResponse; + /** + * OK + */ + @SpeakeasyMetadata() + createClassificationResponse?: shared.CreateClassificationResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createcompletion.ts b/src/sdk/models/operations/createcompletion.ts index a2273b9..be937c7 100755 --- a/src/sdk/models/operations/createcompletion.ts +++ b/src/sdk/models/operations/createcompletion.ts @@ -7,18 +7,18 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateCompletionResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - createCompletionResponse?: shared.CreateCompletionResponse; + /** + * OK + */ + @SpeakeasyMetadata() + createCompletionResponse?: shared.CreateCompletionResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createedit.ts b/src/sdk/models/operations/createedit.ts index aae0c17..372021d 100755 --- a/src/sdk/models/operations/createedit.ts +++ b/src/sdk/models/operations/createedit.ts @@ -7,18 +7,18 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateEditResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - createEditResponse?: shared.CreateEditResponse; + /** + * OK + */ + @SpeakeasyMetadata() + createEditResponse?: shared.CreateEditResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createembedding.ts b/src/sdk/models/operations/createembedding.ts index d83215c..f8ce762 100755 --- a/src/sdk/models/operations/createembedding.ts +++ b/src/sdk/models/operations/createembedding.ts @@ -7,18 +7,18 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateEmbeddingResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - createEmbeddingResponse?: shared.CreateEmbeddingResponse; + /** + * OK + */ + @SpeakeasyMetadata() + createEmbeddingResponse?: shared.CreateEmbeddingResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createfile.ts b/src/sdk/models/operations/createfile.ts index 0a5bf62..254af49 100755 --- a/src/sdk/models/operations/createfile.ts +++ b/src/sdk/models/operations/createfile.ts @@ -3,21 +3,22 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateFileResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - openAIFile?: any; + /** + * OK + */ + @SpeakeasyMetadata() + openAIFile?: shared.OpenAIFile; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createfinetune.ts b/src/sdk/models/operations/createfinetune.ts index 5931b95..757664e 100755 --- a/src/sdk/models/operations/createfinetune.ts +++ b/src/sdk/models/operations/createfinetune.ts @@ -3,21 +3,22 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateFineTuneResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - fineTune?: any; + /** + * OK + */ + @SpeakeasyMetadata() + fineTune?: shared.FineTune; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createimage.ts b/src/sdk/models/operations/createimage.ts index ebfa3e7..4e973b4 100755 --- a/src/sdk/models/operations/createimage.ts +++ b/src/sdk/models/operations/createimage.ts @@ -3,21 +3,22 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateImageResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - imagesResponse?: any; + /** + * OK + */ + @SpeakeasyMetadata() + imagesResponse?: shared.ImagesResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createimageedit.ts b/src/sdk/models/operations/createimageedit.ts index a034f22..e0ad72b 100755 --- a/src/sdk/models/operations/createimageedit.ts +++ b/src/sdk/models/operations/createimageedit.ts @@ -3,21 +3,22 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateImageEditResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - imagesResponse?: any; + /** + * OK + */ + @SpeakeasyMetadata() + imagesResponse?: shared.ImagesResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createimagevariation.ts b/src/sdk/models/operations/createimagevariation.ts index 9a95408..c16f193 100755 --- a/src/sdk/models/operations/createimagevariation.ts +++ b/src/sdk/models/operations/createimagevariation.ts @@ -3,21 +3,22 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateImageVariationResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - imagesResponse?: any; + /** + * OK + */ + @SpeakeasyMetadata() + imagesResponse?: shared.ImagesResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createmoderation.ts b/src/sdk/models/operations/createmoderation.ts index 7a025ac..e936350 100755 --- a/src/sdk/models/operations/createmoderation.ts +++ b/src/sdk/models/operations/createmoderation.ts @@ -7,18 +7,18 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateModerationResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - createModerationResponse?: shared.CreateModerationResponse; + /** + * OK + */ + @SpeakeasyMetadata() + createModerationResponse?: shared.CreateModerationResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createsearch.ts b/src/sdk/models/operations/createsearch.ts index 6e5f054..8452f3e 100755 --- a/src/sdk/models/operations/createsearch.ts +++ b/src/sdk/models/operations/createsearch.ts @@ -7,31 +7,29 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateSearchRequest extends SpeakeasyBase { - @SpeakeasyMetadata({ data: "request, media_type=application/json" }) - createSearchRequest: shared.CreateSearchRequest; + @SpeakeasyMetadata({ data: "request, media_type=application/json" }) + createSearchRequest: shared.CreateSearchRequest; - /** - * The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`. - */ - @SpeakeasyMetadata({ - data: "pathParam, style=simple;explode=false;name=engine_id", - }) - engineId: string; + /** + * The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`. + */ + @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=engine_id" }) + engineId: string; } export class CreateSearchResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - createSearchResponse?: shared.CreateSearchResponse; + /** + * OK + */ + @SpeakeasyMetadata() + createSearchResponse?: shared.CreateSearchResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createtranscription.ts b/src/sdk/models/operations/createtranscription.ts index 259829b..4bf7e45 100755 --- a/src/sdk/models/operations/createtranscription.ts +++ b/src/sdk/models/operations/createtranscription.ts @@ -7,18 +7,18 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateTranscriptionResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - createTranscriptionResponse?: shared.CreateTranscriptionResponse; + /** + * OK + */ + @SpeakeasyMetadata() + createTranscriptionResponse?: shared.CreateTranscriptionResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createtranslation.ts b/src/sdk/models/operations/createtranslation.ts index f859c2a..fc8d2e6 100755 --- a/src/sdk/models/operations/createtranslation.ts +++ b/src/sdk/models/operations/createtranslation.ts @@ -7,18 +7,18 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateTranslationResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - createTranslationResponse?: shared.CreateTranslationResponse; + /** + * OK + */ + @SpeakeasyMetadata() + createTranslationResponse?: shared.CreateTranslationResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/deletefile.ts b/src/sdk/models/operations/deletefile.ts index e4b5e76..c0dd90b 100755 --- a/src/sdk/models/operations/deletefile.ts +++ b/src/sdk/models/operations/deletefile.ts @@ -7,28 +7,26 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class DeleteFileRequest extends SpeakeasyBase { - /** - * The ID of the file to use for this request - */ - @SpeakeasyMetadata({ - data: "pathParam, style=simple;explode=false;name=file_id", - }) - fileId: string; + /** + * The ID of the file to use for this request + */ + @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=file_id" }) + fileId: string; } export class DeleteFileResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - deleteFileResponse?: shared.DeleteFileResponse; + /** + * OK + */ + @SpeakeasyMetadata() + deleteFileResponse?: shared.DeleteFileResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/deletemodel.ts b/src/sdk/models/operations/deletemodel.ts index 6fe8225..85b6afa 100755 --- a/src/sdk/models/operations/deletemodel.ts +++ b/src/sdk/models/operations/deletemodel.ts @@ -7,28 +7,26 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class DeleteModelRequest extends SpeakeasyBase { - /** - * The model to delete - */ - @SpeakeasyMetadata({ - data: "pathParam, style=simple;explode=false;name=model", - }) - model: string; + /** + * The model to delete + */ + @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=model" }) + model: string; } export class DeleteModelResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - deleteModelResponse?: shared.DeleteModelResponse; + /** + * OK + */ + @SpeakeasyMetadata() + deleteModelResponse?: shared.DeleteModelResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/downloadfile.ts b/src/sdk/models/operations/downloadfile.ts index 9f3e460..365ae12 100755 --- a/src/sdk/models/operations/downloadfile.ts +++ b/src/sdk/models/operations/downloadfile.ts @@ -6,28 +6,26 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { AxiosResponse } from "axios"; export class DownloadFileRequest extends SpeakeasyBase { - /** - * The ID of the file to use for this request - */ - @SpeakeasyMetadata({ - data: "pathParam, style=simple;explode=false;name=file_id", - }) - fileId: string; + /** + * The ID of the file to use for this request + */ + @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=file_id" }) + fileId: string; } export class DownloadFileResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; - /** - * OK - */ - @SpeakeasyMetadata() - downloadFile200ApplicationJSONString?: string; + /** + * OK + */ + @SpeakeasyMetadata() + downloadFile200ApplicationJSONString?: string; } diff --git a/src/sdk/models/operations/listengines.ts b/src/sdk/models/operations/listengines.ts index 46d56ae..6d1b335 100755 --- a/src/sdk/models/operations/listengines.ts +++ b/src/sdk/models/operations/listengines.ts @@ -7,18 +7,18 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class ListEnginesResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - listEnginesResponse?: shared.ListEnginesResponse; + /** + * OK + */ + @SpeakeasyMetadata() + listEnginesResponse?: shared.ListEnginesResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/listfiles.ts b/src/sdk/models/operations/listfiles.ts index c631265..516974c 100755 --- a/src/sdk/models/operations/listfiles.ts +++ b/src/sdk/models/operations/listfiles.ts @@ -7,18 +7,18 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class ListFilesResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - listFilesResponse?: shared.ListFilesResponse; + /** + * OK + */ + @SpeakeasyMetadata() + listFilesResponse?: shared.ListFilesResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/listfinetuneevents.ts b/src/sdk/models/operations/listfinetuneevents.ts index 427b749..aab8a6d 100755 --- a/src/sdk/models/operations/listfinetuneevents.ts +++ b/src/sdk/models/operations/listfinetuneevents.ts @@ -7,49 +7,45 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class ListFineTuneEventsRequest extends SpeakeasyBase { - /** - * The ID of the fine-tune job to get events for. - * - * @remarks - * - */ - @SpeakeasyMetadata({ - data: "pathParam, style=simple;explode=false;name=fine_tune_id", - }) - fineTuneId: string; + /** + * The ID of the fine-tune job to get events for. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=fine_tune_id" }) + fineTuneId: string; - /** - * Whether to stream events for the fine-tune job. If set to true, - * - * @remarks - * events will be sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available. The stream will terminate with a - * `data: [DONE]` message when the job is finished (succeeded, cancelled, - * or failed). - * - * If set to false, only events generated so far will be returned. - * - */ - @SpeakeasyMetadata({ - data: "queryParam, style=form;explode=true;name=stream", - }) - stream?: boolean; + /** + * Whether to stream events for the fine-tune job. If set to true, + * + * @remarks + * events will be sent as data-only + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + * as they become available. The stream will terminate with a + * `data: [DONE]` message when the job is finished (succeeded, cancelled, + * or failed). + * + * If set to false, only events generated so far will be returned. + * + */ + @SpeakeasyMetadata({ data: "queryParam, style=form;explode=true;name=stream" }) + stream?: boolean; } export class ListFineTuneEventsResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - listFineTuneEventsResponse?: shared.ListFineTuneEventsResponse; + /** + * OK + */ + @SpeakeasyMetadata() + listFineTuneEventsResponse?: shared.ListFineTuneEventsResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/listfinetunes.ts b/src/sdk/models/operations/listfinetunes.ts index c1ca7e3..c16ff58 100755 --- a/src/sdk/models/operations/listfinetunes.ts +++ b/src/sdk/models/operations/listfinetunes.ts @@ -7,18 +7,18 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class ListFineTunesResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - listFineTunesResponse?: shared.ListFineTunesResponse; + /** + * OK + */ + @SpeakeasyMetadata() + listFineTunesResponse?: shared.ListFineTunesResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/listmodels.ts b/src/sdk/models/operations/listmodels.ts index fecafdc..2475554 100755 --- a/src/sdk/models/operations/listmodels.ts +++ b/src/sdk/models/operations/listmodels.ts @@ -7,18 +7,18 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class ListModelsResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - listModelsResponse?: shared.ListModelsResponse; + /** + * OK + */ + @SpeakeasyMetadata() + listModelsResponse?: shared.ListModelsResponse; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/retrieveengine.ts b/src/sdk/models/operations/retrieveengine.ts index b1996a6..9606bd0 100755 --- a/src/sdk/models/operations/retrieveengine.ts +++ b/src/sdk/models/operations/retrieveengine.ts @@ -3,34 +3,33 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class RetrieveEngineRequest extends SpeakeasyBase { - /** - * The ID of the engine to use for this request - * - * @remarks - * - */ - @SpeakeasyMetadata({ - data: "pathParam, style=simple;explode=false;name=engine_id", - }) - engineId: string; + /** + * The ID of the engine to use for this request + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=engine_id" }) + engineId: string; } export class RetrieveEngineResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - engine?: any; + /** + * OK + */ + @SpeakeasyMetadata() + engine?: shared.Engine; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/retrievefile.ts b/src/sdk/models/operations/retrievefile.ts index ac285b7..0c443f9 100755 --- a/src/sdk/models/operations/retrievefile.ts +++ b/src/sdk/models/operations/retrievefile.ts @@ -3,31 +3,30 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class RetrieveFileRequest extends SpeakeasyBase { - /** - * The ID of the file to use for this request - */ - @SpeakeasyMetadata({ - data: "pathParam, style=simple;explode=false;name=file_id", - }) - fileId: string; + /** + * The ID of the file to use for this request + */ + @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=file_id" }) + fileId: string; } export class RetrieveFileResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - openAIFile?: any; + /** + * OK + */ + @SpeakeasyMetadata() + openAIFile?: shared.OpenAIFile; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/retrievefinetune.ts b/src/sdk/models/operations/retrievefinetune.ts index e7b514f..833b8fc 100755 --- a/src/sdk/models/operations/retrievefinetune.ts +++ b/src/sdk/models/operations/retrievefinetune.ts @@ -3,34 +3,33 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class RetrieveFineTuneRequest extends SpeakeasyBase { - /** - * The ID of the fine-tune job - * - * @remarks - * - */ - @SpeakeasyMetadata({ - data: "pathParam, style=simple;explode=false;name=fine_tune_id", - }) - fineTuneId: string; + /** + * The ID of the fine-tune job + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=fine_tune_id" }) + fineTuneId: string; } export class RetrieveFineTuneResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - fineTune?: any; + /** + * OK + */ + @SpeakeasyMetadata() + fineTune?: shared.FineTune; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/retrievemodel.ts b/src/sdk/models/operations/retrievemodel.ts index 987a64e..33c4cf9 100755 --- a/src/sdk/models/operations/retrievemodel.ts +++ b/src/sdk/models/operations/retrievemodel.ts @@ -3,31 +3,30 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class RetrieveModelRequest extends SpeakeasyBase { - /** - * The ID of the model to use for this request - */ - @SpeakeasyMetadata({ - data: "pathParam, style=simple;explode=false;name=model", - }) - model: string; + /** + * The ID of the model to use for this request + */ + @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=model" }) + model: string; } export class RetrieveModelResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; + @SpeakeasyMetadata() + contentType: string; - /** - * OK - */ - @SpeakeasyMetadata() - model?: any; + /** + * OK + */ + @SpeakeasyMetadata() + model?: shared.Model; - @SpeakeasyMetadata() - statusCode: number; + @SpeakeasyMetadata() + statusCode: number; - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/shared/chatcompletionrequestmessage.ts b/src/sdk/models/shared/chatcompletionrequestmessage.ts index da5b413..789ef5a 100755 --- a/src/sdk/models/shared/chatcompletionrequestmessage.ts +++ b/src/sdk/models/shared/chatcompletionrequestmessage.ts @@ -8,31 +8,31 @@ import { Expose } from "class-transformer"; /** * The role of the author of this message. */ -export enum ChatCompletionRequestMessageRoleEnum { - System = "system", - User = "user", - Assistant = "assistant", +export enum ChatCompletionRequestMessageRole { + System = "system", + User = "user", + Assistant = "assistant", } export class ChatCompletionRequestMessage extends SpeakeasyBase { - /** - * The contents of the message - */ - @SpeakeasyMetadata() - @Expose({ name: "content" }) - content: string; + /** + * The contents of the message + */ + @SpeakeasyMetadata() + @Expose({ name: "content" }) + content: string; - /** - * The name of the user in a multi-user chat - */ - @SpeakeasyMetadata() - @Expose({ name: "name" }) - name?: string; + /** + * The name of the user in a multi-user chat + */ + @SpeakeasyMetadata() + @Expose({ name: "name" }) + name?: string; - /** - * The role of the author of this message. - */ - @SpeakeasyMetadata() - @Expose({ name: "role" }) - role: ChatCompletionRequestMessageRoleEnum; + /** + * The role of the author of this message. + */ + @SpeakeasyMetadata() + @Expose({ name: "role" }) + role: ChatCompletionRequestMessageRole; } diff --git a/src/sdk/models/shared/chatcompletionresponsemessage.ts b/src/sdk/models/shared/chatcompletionresponsemessage.ts index 60ddc96..1643401 100755 --- a/src/sdk/models/shared/chatcompletionresponsemessage.ts +++ b/src/sdk/models/shared/chatcompletionresponsemessage.ts @@ -8,24 +8,24 @@ import { Expose } from "class-transformer"; /** * The role of the author of this message. */ -export enum ChatCompletionResponseMessageRoleEnum { - System = "system", - User = "user", - Assistant = "assistant", +export enum ChatCompletionResponseMessageRole { + System = "system", + User = "user", + Assistant = "assistant", } export class ChatCompletionResponseMessage extends SpeakeasyBase { - /** - * The contents of the message - */ - @SpeakeasyMetadata() - @Expose({ name: "content" }) - content: string; + /** + * The contents of the message + */ + @SpeakeasyMetadata() + @Expose({ name: "content" }) + content: string; - /** - * The role of the author of this message. - */ - @SpeakeasyMetadata() - @Expose({ name: "role" }) - role: ChatCompletionResponseMessageRoleEnum; + /** + * The role of the author of this message. + */ + @SpeakeasyMetadata() + @Expose({ name: "role" }) + role: ChatCompletionResponseMessageRole; } diff --git a/src/sdk/models/shared/createanswerrequest.ts b/src/sdk/models/shared/createanswerrequest.ts index 3337acb..b91faee 100755 --- a/src/sdk/models/shared/createanswerrequest.ts +++ b/src/sdk/models/shared/createanswerrequest.ts @@ -6,137 +6,137 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; export class CreateAnswerRequest extends SpeakeasyBase { - /** - * List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. - * - * @remarks - * - * You should specify either `documents` or a `file`, but not both. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "documents" }) - documents?: string[]; - - /** - * List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples. - */ - @SpeakeasyMetadata() - @Expose({ name: "examples" }) - examples: string[][]; - - /** - * A text snippet containing the contextual information used to generate the answers for the `examples` you provide. - */ - @SpeakeasyMetadata() - @Expose({ name: "examples_context" }) - examplesContext: string; - - /** - * If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion. - */ - @SpeakeasyMetadata() - @Expose({ name: "expand" }) - expand?: any[]; - - /** - * The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. - * - * @remarks - * - * You should specify either `documents` or a `file`, but not both. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "file" }) - file?: string; - - @SpeakeasyMetadata() - @Expose({ name: "logit_bias" }) - logitBias?: any; - - /** - * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - * - * @remarks - * - * The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. - * - * When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "logprobs" }) - logprobs?: number; - - /** - * The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. - */ - @SpeakeasyMetadata() - @Expose({ name: "max_rerank" }) - maxRerank?: number; - - /** - * The maximum number of tokens allowed for the generated answer - */ - @SpeakeasyMetadata() - @Expose({ name: "max_tokens" }) - maxTokens?: number; - - /** - * ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`. - */ - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model: string; - - /** - * How many answers to generate for each question. - */ - @SpeakeasyMetadata() - @Expose({ name: "n" }) - n?: number; - - /** - * Question to get answered. - */ - @SpeakeasyMetadata() - @Expose({ name: "question" }) - question: string; - - @SpeakeasyMetadata() - @Expose({ name: "return_metadata" }) - returnMetadata?: any; - - /** - * If set to `true`, the returned JSON will include a "prompt" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes. - */ - @SpeakeasyMetadata() - @Expose({ name: "return_prompt" }) - returnPrompt?: boolean; - - /** - * ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`. - */ - @SpeakeasyMetadata() - @Expose({ name: "search_model" }) - searchModel?: string; - - /** - * completions_stop_description - */ - @SpeakeasyMetadata() - @Expose({ name: "stop" }) - stop?: any; - - /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - */ - @SpeakeasyMetadata() - @Expose({ name: "temperature" }) - temperature?: number; - - @SpeakeasyMetadata() - @Expose({ name: "user" }) - user?: any; + /** + * List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. + * + * @remarks + * + * You should specify either `documents` or a `file`, but not both. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "documents" }) + documents?: string[]; + + /** + * List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples. + */ + @SpeakeasyMetadata() + @Expose({ name: "examples" }) + examples: string[][]; + + /** + * A text snippet containing the contextual information used to generate the answers for the `examples` you provide. + */ + @SpeakeasyMetadata() + @Expose({ name: "examples_context" }) + examplesContext: string; + + /** + * If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion. + */ + @SpeakeasyMetadata() + @Expose({ name: "expand" }) + expand?: any[]; + + /** + * The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. + * + * @remarks + * + * You should specify either `documents` or a `file`, but not both. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "file" }) + file?: string; + + @SpeakeasyMetadata() + @Expose({ name: "logit_bias" }) + logitBias?: any; + + /** + * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + * + * @remarks + * + * The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. + * + * When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "logprobs" }) + logprobs?: number; + + /** + * The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. + */ + @SpeakeasyMetadata() + @Expose({ name: "max_rerank" }) + maxRerank?: number; + + /** + * The maximum number of tokens allowed for the generated answer + */ + @SpeakeasyMetadata() + @Expose({ name: "max_tokens" }) + maxTokens?: number; + + /** + * ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`. + */ + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model: string; + + /** + * How many answers to generate for each question. + */ + @SpeakeasyMetadata() + @Expose({ name: "n" }) + n?: number; + + /** + * Question to get answered. + */ + @SpeakeasyMetadata() + @Expose({ name: "question" }) + question: string; + + @SpeakeasyMetadata() + @Expose({ name: "return_metadata" }) + returnMetadata?: any; + + /** + * If set to `true`, the returned JSON will include a "prompt" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes. + */ + @SpeakeasyMetadata() + @Expose({ name: "return_prompt" }) + returnPrompt?: boolean; + + /** + * ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`. + */ + @SpeakeasyMetadata() + @Expose({ name: "search_model" }) + searchModel?: string; + + /** + * completions_stop_description + */ + @SpeakeasyMetadata() + @Expose({ name: "stop" }) + stop?: any; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + */ + @SpeakeasyMetadata() + @Expose({ name: "temperature" }) + temperature?: number; + + @SpeakeasyMetadata() + @Expose({ name: "user" }) + user?: any; } diff --git a/src/sdk/models/shared/createanswerresponse.ts b/src/sdk/models/shared/createanswerresponse.ts index e65ad81..cb1b306 100755 --- a/src/sdk/models/shared/createanswerresponse.ts +++ b/src/sdk/models/shared/createanswerresponse.ts @@ -6,41 +6,41 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose, Type } from "class-transformer"; export class CreateAnswerResponseSelectedDocuments extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "document" }) - document?: number; + @SpeakeasyMetadata() + @Expose({ name: "document" }) + document?: number; - @SpeakeasyMetadata() - @Expose({ name: "text" }) - text?: string; + @SpeakeasyMetadata() + @Expose({ name: "text" }) + text?: string; } /** * OK */ export class CreateAnswerResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "answers" }) - answers?: string[]; - - @SpeakeasyMetadata() - @Expose({ name: "completion" }) - completion?: string; - - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model?: string; - - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object?: string; - - @SpeakeasyMetadata() - @Expose({ name: "search_model" }) - searchModel?: string; - - @SpeakeasyMetadata({ elemType: CreateAnswerResponseSelectedDocuments }) - @Expose({ name: "selected_documents" }) - @Type(() => CreateAnswerResponseSelectedDocuments) - selectedDocuments?: CreateAnswerResponseSelectedDocuments[]; + @SpeakeasyMetadata() + @Expose({ name: "answers" }) + answers?: string[]; + + @SpeakeasyMetadata() + @Expose({ name: "completion" }) + completion?: string; + + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model?: string; + + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object?: string; + + @SpeakeasyMetadata() + @Expose({ name: "search_model" }) + searchModel?: string; + + @SpeakeasyMetadata({ elemType: CreateAnswerResponseSelectedDocuments }) + @Expose({ name: "selected_documents" }) + @Type(() => CreateAnswerResponseSelectedDocuments) + selectedDocuments?: CreateAnswerResponseSelectedDocuments[]; } diff --git a/src/sdk/models/shared/createchatcompletionrequest.ts b/src/sdk/models/shared/createchatcompletionrequest.ts index d02e17f..2bd3ee1 100755 --- a/src/sdk/models/shared/createchatcompletionrequest.ts +++ b/src/sdk/models/shared/createchatcompletionrequest.ts @@ -7,99 +7,99 @@ import { ChatCompletionRequestMessage } from "./chatcompletionrequestmessage"; import { Expose, Type } from "class-transformer"; export class CreateChatCompletionRequest extends SpeakeasyBase { - /** - * completions_frequency_penalty_description - */ - @SpeakeasyMetadata() - @Expose({ name: "frequency_penalty" }) - frequencyPenalty?: number; + /** + * completions_frequency_penalty_description + */ + @SpeakeasyMetadata() + @Expose({ name: "frequency_penalty" }) + frequencyPenalty?: number; - /** - * Modify the likelihood of specified tokens appearing in the completion. - * - * @remarks - * - * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "logit_bias" }) - logitBias?: Record; + /** + * Modify the likelihood of specified tokens appearing in the completion. + * + * @remarks + * + * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "logit_bias" }) + logitBias?: Record; - /** - * The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). - * - * @remarks - * - */ - @SpeakeasyMetadata() - @Expose({ name: "max_tokens" }) - maxTokens?: number; + /** + * The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). + * + * @remarks + * + */ + @SpeakeasyMetadata() + @Expose({ name: "max_tokens" }) + maxTokens?: number; - /** - * The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction). - */ - @SpeakeasyMetadata({ elemType: ChatCompletionRequestMessage }) - @Expose({ name: "messages" }) - @Type(() => ChatCompletionRequestMessage) - messages: ChatCompletionRequestMessage[]; + /** + * The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction). + */ + @SpeakeasyMetadata({ elemType: ChatCompletionRequestMessage }) + @Expose({ name: "messages" }) + @Type(() => ChatCompletionRequestMessage) + messages: ChatCompletionRequestMessage[]; - /** - * ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported. - */ - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model: string; + /** + * ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported. + */ + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model: string; - /** - * How many chat completion choices to generate for each input message. - */ - @SpeakeasyMetadata() - @Expose({ name: "n" }) - n?: number; + /** + * How many chat completion choices to generate for each input message. + */ + @SpeakeasyMetadata() + @Expose({ name: "n" }) + n?: number; - /** - * completions_presence_penalty_description - */ - @SpeakeasyMetadata() - @Expose({ name: "presence_penalty" }) - presencePenalty?: number; + /** + * completions_presence_penalty_description + */ + @SpeakeasyMetadata() + @Expose({ name: "presence_penalty" }) + presencePenalty?: number; - /** - * Up to 4 sequences where the API will stop generating further tokens. - * - * @remarks - * - */ - @SpeakeasyMetadata() - @Expose({ name: "stop" }) - stop?: any; + /** + * Up to 4 sequences where the API will stop generating further tokens. + * + * @remarks + * + */ + @SpeakeasyMetadata() + @Expose({ name: "stop" }) + stop?: any; - /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. - * - * @remarks - * - */ - @SpeakeasyMetadata() - @Expose({ name: "stream" }) - stream?: boolean; + /** + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. + * + * @remarks + * + */ + @SpeakeasyMetadata() + @Expose({ name: "stream" }) + stream?: boolean; - /** - * completions_temperature_description - */ - @SpeakeasyMetadata() - @Expose({ name: "temperature" }) - temperature?: number; + /** + * completions_temperature_description + */ + @SpeakeasyMetadata() + @Expose({ name: "temperature" }) + temperature?: number; - /** - * completions_top_p_description - */ - @SpeakeasyMetadata() - @Expose({ name: "top_p" }) - topP?: number; + /** + * completions_top_p_description + */ + @SpeakeasyMetadata() + @Expose({ name: "top_p" }) + topP?: number; - @SpeakeasyMetadata() - @Expose({ name: "user" }) - user?: any; + @SpeakeasyMetadata() + @Expose({ name: "user" }) + user?: any; } diff --git a/src/sdk/models/shared/createchatcompletionresponse.ts b/src/sdk/models/shared/createchatcompletionresponse.ts index 853a0dc..5e3fae0 100755 --- a/src/sdk/models/shared/createchatcompletionresponse.ts +++ b/src/sdk/models/shared/createchatcompletionresponse.ts @@ -7,61 +7,61 @@ import { ChatCompletionResponseMessage } from "./chatcompletionresponsemessage"; import { Expose, Type } from "class-transformer"; export class CreateChatCompletionResponseChoices extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "finish_reason" }) - finishReason?: string; + @SpeakeasyMetadata() + @Expose({ name: "finish_reason" }) + finishReason?: string; - @SpeakeasyMetadata() - @Expose({ name: "index" }) - index?: number; + @SpeakeasyMetadata() + @Expose({ name: "index" }) + index?: number; - @SpeakeasyMetadata() - @Expose({ name: "message" }) - @Type(() => ChatCompletionResponseMessage) - message?: ChatCompletionResponseMessage; + @SpeakeasyMetadata() + @Expose({ name: "message" }) + @Type(() => ChatCompletionResponseMessage) + message?: ChatCompletionResponseMessage; } export class CreateChatCompletionResponseUsage extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "completion_tokens" }) - completionTokens: number; + @SpeakeasyMetadata() + @Expose({ name: "completion_tokens" }) + completionTokens: number; - @SpeakeasyMetadata() - @Expose({ name: "prompt_tokens" }) - promptTokens: number; + @SpeakeasyMetadata() + @Expose({ name: "prompt_tokens" }) + promptTokens: number; - @SpeakeasyMetadata() - @Expose({ name: "total_tokens" }) - totalTokens: number; + @SpeakeasyMetadata() + @Expose({ name: "total_tokens" }) + totalTokens: number; } /** * OK */ export class CreateChatCompletionResponse extends SpeakeasyBase { - @SpeakeasyMetadata({ elemType: CreateChatCompletionResponseChoices }) - @Expose({ name: "choices" }) - @Type(() => CreateChatCompletionResponseChoices) - choices: CreateChatCompletionResponseChoices[]; + @SpeakeasyMetadata({ elemType: CreateChatCompletionResponseChoices }) + @Expose({ name: "choices" }) + @Type(() => CreateChatCompletionResponseChoices) + choices: CreateChatCompletionResponseChoices[]; - @SpeakeasyMetadata() - @Expose({ name: "created" }) - created: number; + @SpeakeasyMetadata() + @Expose({ name: "created" }) + created: number; - @SpeakeasyMetadata() - @Expose({ name: "id" }) - id: string; + @SpeakeasyMetadata() + @Expose({ name: "id" }) + id: string; - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model: string; + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model: string; - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; - @SpeakeasyMetadata() - @Expose({ name: "usage" }) - @Type(() => CreateChatCompletionResponseUsage) - usage?: CreateChatCompletionResponseUsage; + @SpeakeasyMetadata() + @Expose({ name: "usage" }) + @Type(() => CreateChatCompletionResponseUsage) + usage?: CreateChatCompletionResponseUsage; } diff --git a/src/sdk/models/shared/createclassificationrequest.ts b/src/sdk/models/shared/createclassificationrequest.ts index f28dc67..4e99161 100755 --- a/src/sdk/models/shared/createclassificationrequest.ts +++ b/src/sdk/models/shared/createclassificationrequest.ts @@ -6,91 +6,91 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; export class CreateClassificationRequest extends SpeakeasyBase { - /** - * A list of examples with labels, in the following format: - * - * @remarks - * - * `[["The movie is so interesting.", "Positive"], ["It is quite boring.", "Negative"], ...]` - * - * All the label strings will be normalized to be capitalized. - * - * You should specify either `examples` or `file`, but not both. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "examples" }) - examples?: string[][]; + /** + * A list of examples with labels, in the following format: + * + * @remarks + * + * `[["The movie is so interesting.", "Positive"], ["It is quite boring.", "Negative"], ...]` + * + * All the label strings will be normalized to be capitalized. + * + * You should specify either `examples` or `file`, but not both. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "examples" }) + examples?: string[][]; - @SpeakeasyMetadata() - @Expose({ name: "expand" }) - expand?: any; + @SpeakeasyMetadata() + @Expose({ name: "expand" }) + expand?: any; - /** - * The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. - * - * @remarks - * - * You should specify either `examples` or `file`, but not both. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "file" }) - file?: string; + /** + * The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. + * + * @remarks + * + * You should specify either `examples` or `file`, but not both. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "file" }) + file?: string; - /** - * The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized. - */ - @SpeakeasyMetadata() - @Expose({ name: "labels" }) - labels?: string[]; + /** + * The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized. + */ + @SpeakeasyMetadata() + @Expose({ name: "labels" }) + labels?: string[]; - @SpeakeasyMetadata() - @Expose({ name: "logit_bias" }) - logitBias?: any; + @SpeakeasyMetadata() + @Expose({ name: "logit_bias" }) + logitBias?: any; - @SpeakeasyMetadata() - @Expose({ name: "logprobs" }) - logprobs?: any; + @SpeakeasyMetadata() + @Expose({ name: "logprobs" }) + logprobs?: any; - /** - * The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. - */ - @SpeakeasyMetadata() - @Expose({ name: "max_examples" }) - maxExamples?: number; + /** + * The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. + */ + @SpeakeasyMetadata() + @Expose({ name: "max_examples" }) + maxExamples?: number; - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model: any; + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model: any; - /** - * Query to be classified. - */ - @SpeakeasyMetadata() - @Expose({ name: "query" }) - query: string; + /** + * Query to be classified. + */ + @SpeakeasyMetadata() + @Expose({ name: "query" }) + query: string; - @SpeakeasyMetadata() - @Expose({ name: "return_metadata" }) - returnMetadata?: any; + @SpeakeasyMetadata() + @Expose({ name: "return_metadata" }) + returnMetadata?: any; - @SpeakeasyMetadata() - @Expose({ name: "return_prompt" }) - returnPrompt?: any; + @SpeakeasyMetadata() + @Expose({ name: "return_prompt" }) + returnPrompt?: any; - @SpeakeasyMetadata() - @Expose({ name: "search_model" }) - searchModel?: any; + @SpeakeasyMetadata() + @Expose({ name: "search_model" }) + searchModel?: any; - /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - */ - @SpeakeasyMetadata() - @Expose({ name: "temperature" }) - temperature?: number; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + */ + @SpeakeasyMetadata() + @Expose({ name: "temperature" }) + temperature?: number; - @SpeakeasyMetadata() - @Expose({ name: "user" }) - user?: any; + @SpeakeasyMetadata() + @Expose({ name: "user" }) + user?: any; } diff --git a/src/sdk/models/shared/createclassificationresponse.ts b/src/sdk/models/shared/createclassificationresponse.ts index a54116b..bc9ad9b 100755 --- a/src/sdk/models/shared/createclassificationresponse.ts +++ b/src/sdk/models/shared/createclassificationresponse.ts @@ -6,45 +6,45 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose, Type } from "class-transformer"; export class CreateClassificationResponseSelectedExamples extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "document" }) - document?: number; + @SpeakeasyMetadata() + @Expose({ name: "document" }) + document?: number; - @SpeakeasyMetadata() - @Expose({ name: "label" }) - label?: string; + @SpeakeasyMetadata() + @Expose({ name: "label" }) + label?: string; - @SpeakeasyMetadata() - @Expose({ name: "text" }) - text?: string; + @SpeakeasyMetadata() + @Expose({ name: "text" }) + text?: string; } /** * OK */ export class CreateClassificationResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "completion" }) - completion?: string; - - @SpeakeasyMetadata() - @Expose({ name: "label" }) - label?: string; - - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model?: string; - - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object?: string; - - @SpeakeasyMetadata() - @Expose({ name: "search_model" }) - searchModel?: string; - - @SpeakeasyMetadata({ elemType: CreateClassificationResponseSelectedExamples }) - @Expose({ name: "selected_examples" }) - @Type(() => CreateClassificationResponseSelectedExamples) - selectedExamples?: CreateClassificationResponseSelectedExamples[]; + @SpeakeasyMetadata() + @Expose({ name: "completion" }) + completion?: string; + + @SpeakeasyMetadata() + @Expose({ name: "label" }) + label?: string; + + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model?: string; + + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object?: string; + + @SpeakeasyMetadata() + @Expose({ name: "search_model" }) + searchModel?: string; + + @SpeakeasyMetadata({ elemType: CreateClassificationResponseSelectedExamples }) + @Expose({ name: "selected_examples" }) + @Type(() => CreateClassificationResponseSelectedExamples) + selectedExamples?: CreateClassificationResponseSelectedExamples[]; } diff --git a/src/sdk/models/shared/createcompletionrequest.ts b/src/sdk/models/shared/createcompletionrequest.ts index 0b50e5d..f13fd8a 100755 --- a/src/sdk/models/shared/createcompletionrequest.ts +++ b/src/sdk/models/shared/createcompletionrequest.ts @@ -6,181 +6,181 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; export class CreateCompletionRequest extends SpeakeasyBase { - /** - * Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. - * - * @remarks - * - * When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. - * - * **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "best_of" }) - bestOf?: number; - - /** - * Echo back the prompt in addition to the completion - * - * @remarks - * - */ - @SpeakeasyMetadata() - @Expose({ name: "echo" }) - echo?: boolean; - - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - * - * @remarks - * - * [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) - * - */ - @SpeakeasyMetadata() - @Expose({ name: "frequency_penalty" }) - frequencyPenalty?: number; - - /** - * Modify the likelihood of specified tokens appearing in the completion. - * - * @remarks - * - * Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - * - * As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "logit_bias" }) - logitBias?: Record; - - /** - * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - * - * @remarks - * - * The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "logprobs" }) - logprobs?: number; - - /** - * The maximum number of [tokens](/tokenizer) to generate in the completion. - * - * @remarks - * - * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096). - * - */ - @SpeakeasyMetadata() - @Expose({ name: "max_tokens" }) - maxTokens?: number; - - /** - * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - */ - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model: string; - - /** - * How many completions to generate for each prompt. - * - * @remarks - * - * **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "n" }) - n?: number; - - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - * - * @remarks - * - * [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) - * - */ - @SpeakeasyMetadata() - @Expose({ name: "presence_penalty" }) - presencePenalty?: number; - - /** - * The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. - * - * @remarks - * - * Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "prompt" }) - prompt?: any; - - /** - * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. - * - * @remarks - * - */ - @SpeakeasyMetadata() - @Expose({ name: "stop" }) - stop?: any; - - /** - * Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. - * - * @remarks - * - */ - @SpeakeasyMetadata() - @Expose({ name: "stream" }) - stream?: boolean; - - /** - * The suffix that comes after a completion of inserted text. - */ - @SpeakeasyMetadata() - @Expose({ name: "suffix" }) - suffix?: string; - - /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - * - * @remarks - * - * We generally recommend altering this or `top_p` but not both. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "temperature" }) - temperature?: number; - - /** - * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. - * - * @remarks - * - * We generally recommend altering this or `temperature` but not both. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "top_p" }) - topP?: number; - - /** - * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - * - * @remarks - * - */ - @SpeakeasyMetadata() - @Expose({ name: "user" }) - user?: string; + /** + * Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + * + * @remarks + * + * When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + * + * **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "best_of" }) + bestOf?: number; + + /** + * Echo back the prompt in addition to the completion + * + * @remarks + * + */ + @SpeakeasyMetadata() + @Expose({ name: "echo" }) + echo?: boolean; + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + * + * @remarks + * + * [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) + * + */ + @SpeakeasyMetadata() + @Expose({ name: "frequency_penalty" }) + frequencyPenalty?: number; + + /** + * Modify the likelihood of specified tokens appearing in the completion. + * + * @remarks + * + * Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + * + * As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "logit_bias" }) + logitBias?: Record; + + /** + * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + * + * @remarks + * + * The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "logprobs" }) + logprobs?: number; + + /** + * The maximum number of [tokens](/tokenizer) to generate in the completion. + * + * @remarks + * + * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096). + * + */ + @SpeakeasyMetadata() + @Expose({ name: "max_tokens" }) + maxTokens?: number; + + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + */ + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model: string; + + /** + * How many completions to generate for each prompt. + * + * @remarks + * + * **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "n" }) + n?: number; + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + * + * @remarks + * + * [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) + * + */ + @SpeakeasyMetadata() + @Expose({ name: "presence_penalty" }) + presencePenalty?: number; + + /** + * The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + * + * @remarks + * + * Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "prompt" }) + prompt?: any; + + /** + * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + * + * @remarks + * + */ + @SpeakeasyMetadata() + @Expose({ name: "stop" }) + stop?: any; + + /** + * Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. + * + * @remarks + * + */ + @SpeakeasyMetadata() + @Expose({ name: "stream" }) + stream?: boolean; + + /** + * The suffix that comes after a completion of inserted text. + */ + @SpeakeasyMetadata() + @Expose({ name: "suffix" }) + suffix?: string; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + * + * @remarks + * + * We generally recommend altering this or `top_p` but not both. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "temperature" }) + temperature?: number; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * + * @remarks + * + * We generally recommend altering this or `temperature` but not both. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "top_p" }) + topP?: number; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + * + * @remarks + * + */ + @SpeakeasyMetadata() + @Expose({ name: "user" }) + user?: string; } diff --git a/src/sdk/models/shared/createcompletionresponse.ts b/src/sdk/models/shared/createcompletionresponse.ts index c596a91..cb99f53 100755 --- a/src/sdk/models/shared/createcompletionresponse.ts +++ b/src/sdk/models/shared/createcompletionresponse.ts @@ -6,83 +6,83 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose, Type } from "class-transformer"; export class CreateCompletionResponseChoicesLogprobs extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "text_offset" }) - textOffset?: number[]; + @SpeakeasyMetadata() + @Expose({ name: "text_offset" }) + textOffset?: number[]; - @SpeakeasyMetadata() - @Expose({ name: "token_logprobs" }) - tokenLogprobs?: number[]; + @SpeakeasyMetadata() + @Expose({ name: "token_logprobs" }) + tokenLogprobs?: number[]; - @SpeakeasyMetadata() - @Expose({ name: "tokens" }) - tokens?: string[]; + @SpeakeasyMetadata() + @Expose({ name: "tokens" }) + tokens?: string[]; - @SpeakeasyMetadata() - @Expose({ name: "top_logprobs" }) - topLogprobs?: Record[]; + @SpeakeasyMetadata() + @Expose({ name: "top_logprobs" }) + topLogprobs?: Record[]; } export class CreateCompletionResponseChoices extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "finish_reason" }) - finishReason?: string; - - @SpeakeasyMetadata() - @Expose({ name: "index" }) - index?: number; - - @SpeakeasyMetadata() - @Expose({ name: "logprobs" }) - @Type(() => CreateCompletionResponseChoicesLogprobs) - logprobs?: CreateCompletionResponseChoicesLogprobs; - - @SpeakeasyMetadata() - @Expose({ name: "text" }) - text?: string; + @SpeakeasyMetadata() + @Expose({ name: "finish_reason" }) + finishReason?: string; + + @SpeakeasyMetadata() + @Expose({ name: "index" }) + index?: number; + + @SpeakeasyMetadata() + @Expose({ name: "logprobs" }) + @Type(() => CreateCompletionResponseChoicesLogprobs) + logprobs?: CreateCompletionResponseChoicesLogprobs; + + @SpeakeasyMetadata() + @Expose({ name: "text" }) + text?: string; } export class CreateCompletionResponseUsage extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "completion_tokens" }) - completionTokens: number; + @SpeakeasyMetadata() + @Expose({ name: "completion_tokens" }) + completionTokens: number; - @SpeakeasyMetadata() - @Expose({ name: "prompt_tokens" }) - promptTokens: number; + @SpeakeasyMetadata() + @Expose({ name: "prompt_tokens" }) + promptTokens: number; - @SpeakeasyMetadata() - @Expose({ name: "total_tokens" }) - totalTokens: number; + @SpeakeasyMetadata() + @Expose({ name: "total_tokens" }) + totalTokens: number; } /** * OK */ export class CreateCompletionResponse extends SpeakeasyBase { - @SpeakeasyMetadata({ elemType: CreateCompletionResponseChoices }) - @Expose({ name: "choices" }) - @Type(() => CreateCompletionResponseChoices) - choices: CreateCompletionResponseChoices[]; - - @SpeakeasyMetadata() - @Expose({ name: "created" }) - created: number; - - @SpeakeasyMetadata() - @Expose({ name: "id" }) - id: string; - - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model: string; - - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; - - @SpeakeasyMetadata() - @Expose({ name: "usage" }) - @Type(() => CreateCompletionResponseUsage) - usage?: CreateCompletionResponseUsage; + @SpeakeasyMetadata({ elemType: CreateCompletionResponseChoices }) + @Expose({ name: "choices" }) + @Type(() => CreateCompletionResponseChoices) + choices: CreateCompletionResponseChoices[]; + + @SpeakeasyMetadata() + @Expose({ name: "created" }) + created: number; + + @SpeakeasyMetadata() + @Expose({ name: "id" }) + id: string; + + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model: string; + + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; + + @SpeakeasyMetadata() + @Expose({ name: "usage" }) + @Type(() => CreateCompletionResponseUsage) + usage?: CreateCompletionResponseUsage; } diff --git a/src/sdk/models/shared/createeditrequest.ts b/src/sdk/models/shared/createeditrequest.ts index 1eab2c9..5ab2b38 100755 --- a/src/sdk/models/shared/createeditrequest.ts +++ b/src/sdk/models/shared/createeditrequest.ts @@ -6,45 +6,45 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; export class CreateEditRequest extends SpeakeasyBase { - /** - * The input text to use as a starting point for the edit. - */ - @SpeakeasyMetadata() - @Expose({ name: "input" }) - input?: string; + /** + * The input text to use as a starting point for the edit. + */ + @SpeakeasyMetadata() + @Expose({ name: "input" }) + input?: string; - /** - * The instruction that tells the model how to edit the prompt. - */ - @SpeakeasyMetadata() - @Expose({ name: "instruction" }) - instruction: string; + /** + * The instruction that tells the model how to edit the prompt. + */ + @SpeakeasyMetadata() + @Expose({ name: "instruction" }) + instruction: string; - /** - * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. - */ - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model: string; + /** + * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. + */ + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model: string; - /** - * How many edits to generate for the input and instruction. - */ - @SpeakeasyMetadata() - @Expose({ name: "n" }) - n?: number; + /** + * How many edits to generate for the input and instruction. + */ + @SpeakeasyMetadata() + @Expose({ name: "n" }) + n?: number; - /** - * completions_temperature_description - */ - @SpeakeasyMetadata() - @Expose({ name: "temperature" }) - temperature?: number; + /** + * completions_temperature_description + */ + @SpeakeasyMetadata() + @Expose({ name: "temperature" }) + temperature?: number; - /** - * completions_top_p_description - */ - @SpeakeasyMetadata() - @Expose({ name: "top_p" }) - topP?: number; + /** + * completions_top_p_description + */ + @SpeakeasyMetadata() + @Expose({ name: "top_p" }) + topP?: number; } diff --git a/src/sdk/models/shared/createeditresponse.ts b/src/sdk/models/shared/createeditresponse.ts index 4d93bd8..b1820bb 100755 --- a/src/sdk/models/shared/createeditresponse.ts +++ b/src/sdk/models/shared/createeditresponse.ts @@ -6,75 +6,75 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose, Type } from "class-transformer"; export class CreateEditResponseChoicesLogprobs extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "text_offset" }) - textOffset?: number[]; + @SpeakeasyMetadata() + @Expose({ name: "text_offset" }) + textOffset?: number[]; - @SpeakeasyMetadata() - @Expose({ name: "token_logprobs" }) - tokenLogprobs?: number[]; + @SpeakeasyMetadata() + @Expose({ name: "token_logprobs" }) + tokenLogprobs?: number[]; - @SpeakeasyMetadata() - @Expose({ name: "tokens" }) - tokens?: string[]; + @SpeakeasyMetadata() + @Expose({ name: "tokens" }) + tokens?: string[]; - @SpeakeasyMetadata() - @Expose({ name: "top_logprobs" }) - topLogprobs?: Record[]; + @SpeakeasyMetadata() + @Expose({ name: "top_logprobs" }) + topLogprobs?: Record[]; } export class CreateEditResponseChoices extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "finish_reason" }) - finishReason?: string; - - @SpeakeasyMetadata() - @Expose({ name: "index" }) - index?: number; - - @SpeakeasyMetadata() - @Expose({ name: "logprobs" }) - @Type(() => CreateEditResponseChoicesLogprobs) - logprobs?: CreateEditResponseChoicesLogprobs; - - @SpeakeasyMetadata() - @Expose({ name: "text" }) - text?: string; + @SpeakeasyMetadata() + @Expose({ name: "finish_reason" }) + finishReason?: string; + + @SpeakeasyMetadata() + @Expose({ name: "index" }) + index?: number; + + @SpeakeasyMetadata() + @Expose({ name: "logprobs" }) + @Type(() => CreateEditResponseChoicesLogprobs) + logprobs?: CreateEditResponseChoicesLogprobs; + + @SpeakeasyMetadata() + @Expose({ name: "text" }) + text?: string; } export class CreateEditResponseUsage extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "completion_tokens" }) - completionTokens: number; + @SpeakeasyMetadata() + @Expose({ name: "completion_tokens" }) + completionTokens: number; - @SpeakeasyMetadata() - @Expose({ name: "prompt_tokens" }) - promptTokens: number; + @SpeakeasyMetadata() + @Expose({ name: "prompt_tokens" }) + promptTokens: number; - @SpeakeasyMetadata() - @Expose({ name: "total_tokens" }) - totalTokens: number; + @SpeakeasyMetadata() + @Expose({ name: "total_tokens" }) + totalTokens: number; } /** * OK */ export class CreateEditResponse extends SpeakeasyBase { - @SpeakeasyMetadata({ elemType: CreateEditResponseChoices }) - @Expose({ name: "choices" }) - @Type(() => CreateEditResponseChoices) - choices: CreateEditResponseChoices[]; - - @SpeakeasyMetadata() - @Expose({ name: "created" }) - created: number; - - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; - - @SpeakeasyMetadata() - @Expose({ name: "usage" }) - @Type(() => CreateEditResponseUsage) - usage: CreateEditResponseUsage; + @SpeakeasyMetadata({ elemType: CreateEditResponseChoices }) + @Expose({ name: "choices" }) + @Type(() => CreateEditResponseChoices) + choices: CreateEditResponseChoices[]; + + @SpeakeasyMetadata() + @Expose({ name: "created" }) + created: number; + + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; + + @SpeakeasyMetadata() + @Expose({ name: "usage" }) + @Type(() => CreateEditResponseUsage) + usage: CreateEditResponseUsage; } diff --git a/src/sdk/models/shared/createembeddingrequest.ts b/src/sdk/models/shared/createembeddingrequest.ts index ba51bb6..f42371a 100755 --- a/src/sdk/models/shared/createembeddingrequest.ts +++ b/src/sdk/models/shared/createembeddingrequest.ts @@ -6,21 +6,21 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; export class CreateEmbeddingRequest extends SpeakeasyBase { - /** - * Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length. - * - * @remarks - * - */ - @SpeakeasyMetadata() - @Expose({ name: "input" }) - input: any; + /** + * Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length. + * + * @remarks + * + */ + @SpeakeasyMetadata() + @Expose({ name: "input" }) + input: any; - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model: any; + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model: any; - @SpeakeasyMetadata() - @Expose({ name: "user" }) - user?: any; + @SpeakeasyMetadata() + @Expose({ name: "user" }) + user?: any; } diff --git a/src/sdk/models/shared/createembeddingresponse.ts b/src/sdk/models/shared/createembeddingresponse.ts index b46fe48..1a468a5 100755 --- a/src/sdk/models/shared/createembeddingresponse.ts +++ b/src/sdk/models/shared/createembeddingresponse.ts @@ -6,48 +6,48 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose, Type } from "class-transformer"; export class CreateEmbeddingResponseData extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "embedding" }) - embedding: number[]; + @SpeakeasyMetadata() + @Expose({ name: "embedding" }) + embedding: number[]; - @SpeakeasyMetadata() - @Expose({ name: "index" }) - index: number; + @SpeakeasyMetadata() + @Expose({ name: "index" }) + index: number; - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; } export class CreateEmbeddingResponseUsage extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "prompt_tokens" }) - promptTokens: number; + @SpeakeasyMetadata() + @Expose({ name: "prompt_tokens" }) + promptTokens: number; - @SpeakeasyMetadata() - @Expose({ name: "total_tokens" }) - totalTokens: number; + @SpeakeasyMetadata() + @Expose({ name: "total_tokens" }) + totalTokens: number; } /** * OK */ export class CreateEmbeddingResponse extends SpeakeasyBase { - @SpeakeasyMetadata({ elemType: CreateEmbeddingResponseData }) - @Expose({ name: "data" }) - @Type(() => CreateEmbeddingResponseData) - data: CreateEmbeddingResponseData[]; - - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model: string; - - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; - - @SpeakeasyMetadata() - @Expose({ name: "usage" }) - @Type(() => CreateEmbeddingResponseUsage) - usage: CreateEmbeddingResponseUsage; + @SpeakeasyMetadata({ elemType: CreateEmbeddingResponseData }) + @Expose({ name: "data" }) + @Type(() => CreateEmbeddingResponseData) + data: CreateEmbeddingResponseData[]; + + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model: string; + + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; + + @SpeakeasyMetadata() + @Expose({ name: "usage" }) + @Type(() => CreateEmbeddingResponseUsage) + usage: CreateEmbeddingResponseUsage; } diff --git a/src/sdk/models/shared/createfilerequest.ts b/src/sdk/models/shared/createfilerequest.ts index 957547b..724512c 100755 --- a/src/sdk/models/shared/createfilerequest.ts +++ b/src/sdk/models/shared/createfilerequest.ts @@ -5,33 +5,33 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; export class CreateFileRequestFile extends SpeakeasyBase { - @SpeakeasyMetadata({ data: "multipart_form, content=true" }) - content: Uint8Array; + @SpeakeasyMetadata({ data: "multipart_form, content=true" }) + content: Uint8Array; - @SpeakeasyMetadata({ data: "multipart_form, name=file" }) - file: string; + @SpeakeasyMetadata({ data: "multipart_form, name=file" }) + file: string; } export class CreateFileRequest extends SpeakeasyBase { - /** - * Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. - * - * @remarks - * - * If the `purpose` is set to "fine-tune", each line is a JSON record with "prompt" and "completion" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data). - * - */ - @SpeakeasyMetadata({ data: "multipart_form, file=true" }) - file: CreateFileRequestFile; + /** + * Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. + * + * @remarks + * + * If the `purpose` is set to "fine-tune", each line is a JSON record with "prompt" and "completion" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data). + * + */ + @SpeakeasyMetadata({ data: "multipart_form, file=true" }) + file: CreateFileRequestFile; - /** - * The intended purpose of the uploaded documents. - * - * @remarks - * - * Use "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file. - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=purpose" }) - purpose: string; + /** + * The intended purpose of the uploaded documents. + * + * @remarks + * + * Use "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file. + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=purpose" }) + purpose: string; } diff --git a/src/sdk/models/shared/createfinetunerequest.ts b/src/sdk/models/shared/createfinetunerequest.ts index 35a8cba..bc4f1b1 100755 --- a/src/sdk/models/shared/createfinetunerequest.ts +++ b/src/sdk/models/shared/createfinetunerequest.ts @@ -6,188 +6,188 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; export class CreateFineTuneRequest extends SpeakeasyBase { - /** - * The batch size to use for training. The batch size is the number of - * - * @remarks - * training examples used to train a single forward and backward pass. - * - * By default, the batch size will be dynamically configured to be - * ~0.2% of the number of examples in the training set, capped at 256 - - * in general, we've found that larger batch sizes tend to work better - * for larger datasets. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "batch_size" }) - batchSize?: number; + /** + * The batch size to use for training. The batch size is the number of + * + * @remarks + * training examples used to train a single forward and backward pass. + * + * By default, the batch size will be dynamically configured to be + * ~0.2% of the number of examples in the training set, capped at 256 - + * in general, we've found that larger batch sizes tend to work better + * for larger datasets. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "batch_size" }) + batchSize?: number; - /** - * If this is provided, we calculate F-beta scores at the specified - * - * @remarks - * beta values. The F-beta score is a generalization of F-1 score. - * This is only used for binary classification. - * - * With a beta of 1 (i.e. the F-1 score), precision and recall are - * given the same weight. A larger beta score puts more weight on - * recall and less on precision. A smaller beta score puts more weight - * on precision and less on recall. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "classification_betas" }) - classificationBetas?: number[]; + /** + * If this is provided, we calculate F-beta scores at the specified + * + * @remarks + * beta values. The F-beta score is a generalization of F-1 score. + * This is only used for binary classification. + * + * With a beta of 1 (i.e. the F-1 score), precision and recall are + * given the same weight. A larger beta score puts more weight on + * recall and less on precision. A smaller beta score puts more weight + * on precision and less on recall. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "classification_betas" }) + classificationBetas?: number[]; - /** - * The number of classes in a classification task. - * - * @remarks - * - * This parameter is required for multiclass classification. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "classification_n_classes" }) - classificationNClasses?: number; + /** + * The number of classes in a classification task. + * + * @remarks + * + * This parameter is required for multiclass classification. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "classification_n_classes" }) + classificationNClasses?: number; - /** - * The positive class in binary classification. - * - * @remarks - * - * This parameter is needed to generate precision, recall, and F1 - * metrics when doing binary classification. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "classification_positive_class" }) - classificationPositiveClass?: string; + /** + * The positive class in binary classification. + * + * @remarks + * + * This parameter is needed to generate precision, recall, and F1 + * metrics when doing binary classification. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "classification_positive_class" }) + classificationPositiveClass?: string; - /** - * If set, we calculate classification-specific metrics such as accuracy - * - * @remarks - * and F-1 score using the validation set at the end of every epoch. - * These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). - * - * In order to compute classification metrics, you must provide a - * `validation_file`. Additionally, you must - * specify `classification_n_classes` for multiclass classification or - * `classification_positive_class` for binary classification. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "compute_classification_metrics" }) - computeClassificationMetrics?: boolean; + /** + * If set, we calculate classification-specific metrics such as accuracy + * + * @remarks + * and F-1 score using the validation set at the end of every epoch. + * These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). + * + * In order to compute classification metrics, you must provide a + * `validation_file`. Additionally, you must + * specify `classification_n_classes` for multiclass classification or + * `classification_positive_class` for binary classification. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "compute_classification_metrics" }) + computeClassificationMetrics?: boolean; - /** - * The learning rate multiplier to use for training. - * - * @remarks - * The fine-tuning learning rate is the original learning rate used for - * pretraining multiplied by this value. - * - * By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 - * depending on final `batch_size` (larger learning rates tend to - * perform better with larger batch sizes). We recommend experimenting - * with values in the range 0.02 to 0.2 to see what produces the best - * results. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "learning_rate_multiplier" }) - learningRateMultiplier?: number; + /** + * The learning rate multiplier to use for training. + * + * @remarks + * The fine-tuning learning rate is the original learning rate used for + * pretraining multiplied by this value. + * + * By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 + * depending on final `batch_size` (larger learning rates tend to + * perform better with larger batch sizes). We recommend experimenting + * with values in the range 0.02 to 0.2 to see what produces the best + * results. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "learning_rate_multiplier" }) + learningRateMultiplier?: number; - /** - * The name of the base model to fine-tune. You can select one of "ada", - * - * @remarks - * "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. - * To learn more about these models, see the - * [Models](https://platform.openai.com/docs/models) documentation. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model?: string; + /** + * The name of the base model to fine-tune. You can select one of "ada", + * + * @remarks + * "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. + * To learn more about these models, see the + * [Models](https://platform.openai.com/docs/models) documentation. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model?: string; - /** - * The number of epochs to train the model for. An epoch refers to one - * - * @remarks - * full cycle through the training dataset. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "n_epochs" }) - nEpochs?: number; + /** + * The number of epochs to train the model for. An epoch refers to one + * + * @remarks + * full cycle through the training dataset. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "n_epochs" }) + nEpochs?: number; - /** - * The weight to use for loss on the prompt tokens. This controls how - * - * @remarks - * much the model tries to learn to generate the prompt (as compared - * to the completion which always has a weight of 1.0), and can add - * a stabilizing effect to training when completions are short. - * - * If prompts are extremely long (relative to completions), it may make - * sense to reduce this weight so as to avoid over-prioritizing - * learning the prompt. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "prompt_loss_weight" }) - promptLossWeight?: number; + /** + * The weight to use for loss on the prompt tokens. This controls how + * + * @remarks + * much the model tries to learn to generate the prompt (as compared + * to the completion which always has a weight of 1.0), and can add + * a stabilizing effect to training when completions are short. + * + * If prompts are extremely long (relative to completions), it may make + * sense to reduce this weight so as to avoid over-prioritizing + * learning the prompt. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "prompt_loss_weight" }) + promptLossWeight?: number; - /** - * A string of up to 40 characters that will be added to your fine-tuned model name. - * - * @remarks - * - * For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "suffix" }) - suffix?: string; + /** + * A string of up to 40 characters that will be added to your fine-tuned model name. + * + * @remarks + * + * For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "suffix" }) + suffix?: string; - /** - * The ID of an uploaded file that contains training data. - * - * @remarks - * - * See [upload file](/docs/api-reference/files/upload) for how to upload a file. - * - * Your dataset must be formatted as a JSONL file, where each training - * example is a JSON object with the keys "prompt" and "completion". - * Additionally, you must upload your file with the purpose `fine-tune`. - * - * See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "training_file" }) - trainingFile: string; + /** + * The ID of an uploaded file that contains training data. + * + * @remarks + * + * See [upload file](/docs/api-reference/files/upload) for how to upload a file. + * + * Your dataset must be formatted as a JSONL file, where each training + * example is a JSON object with the keys "prompt" and "completion". + * Additionally, you must upload your file with the purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "training_file" }) + trainingFile: string; - /** - * The ID of an uploaded file that contains validation data. - * - * @remarks - * - * If you provide this file, the data is used to generate validation - * metrics periodically during fine-tuning. These metrics can be viewed in - * the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). - * Your train and validation data should be mutually exclusive. - * - * Your dataset must be formatted as a JSONL file, where each validation - * example is a JSON object with the keys "prompt" and "completion". - * Additionally, you must upload your file with the purpose `fine-tune`. - * - * See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "validation_file" }) - validationFile?: string; + /** + * The ID of an uploaded file that contains validation data. + * + * @remarks + * + * If you provide this file, the data is used to generate validation + * metrics periodically during fine-tuning. These metrics can be viewed in + * the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). + * Your train and validation data should be mutually exclusive. + * + * Your dataset must be formatted as a JSONL file, where each validation + * example is a JSON object with the keys "prompt" and "completion". + * Additionally, you must upload your file with the purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "validation_file" }) + validationFile?: string; } diff --git a/src/sdk/models/shared/createimageeditrequest.ts b/src/sdk/models/shared/createimageeditrequest.ts index ee3123b..d193c90 100755 --- a/src/sdk/models/shared/createimageeditrequest.ts +++ b/src/sdk/models/shared/createimageeditrequest.ts @@ -5,49 +5,49 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; export class CreateImageEditRequestImage extends SpeakeasyBase { - @SpeakeasyMetadata({ data: "multipart_form, content=true" }) - content: Uint8Array; + @SpeakeasyMetadata({ data: "multipart_form, content=true" }) + content: Uint8Array; - @SpeakeasyMetadata({ data: "multipart_form, name=image" }) - image: string; + @SpeakeasyMetadata({ data: "multipart_form, name=image" }) + image: string; } export class CreateImageEditRequestMask extends SpeakeasyBase { - @SpeakeasyMetadata({ data: "multipart_form, content=true" }) - content: Uint8Array; + @SpeakeasyMetadata({ data: "multipart_form, content=true" }) + content: Uint8Array; - @SpeakeasyMetadata({ data: "multipart_form, name=mask" }) - mask: string; + @SpeakeasyMetadata({ data: "multipart_form, name=mask" }) + mask: string; } export class CreateImageEditRequest extends SpeakeasyBase { - /** - * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. - */ - @SpeakeasyMetadata({ data: "multipart_form, file=true" }) - image: CreateImageEditRequestImage; - - /** - * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. - */ - @SpeakeasyMetadata({ data: "multipart_form, file=true" }) - mask?: CreateImageEditRequestMask; - - @SpeakeasyMetadata({ data: "multipart_form, name=n" }) - n?: any; - - /** - * A text description of the desired image(s). The maximum length is 1000 characters. - */ - @SpeakeasyMetadata({ data: "multipart_form, name=prompt" }) - prompt: string; - - @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) - responseFormat?: any; - - @SpeakeasyMetadata({ data: "multipart_form, name=size" }) - size?: any; - - @SpeakeasyMetadata({ data: "multipart_form, name=user" }) - user?: any; + /** + * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. + */ + @SpeakeasyMetadata({ data: "multipart_form, file=true" }) + image: CreateImageEditRequestImage; + + /** + * An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + */ + @SpeakeasyMetadata({ data: "multipart_form, file=true" }) + mask?: CreateImageEditRequestMask; + + @SpeakeasyMetadata({ data: "multipart_form, name=n" }) + n?: any; + + /** + * A text description of the desired image(s). The maximum length is 1000 characters. + */ + @SpeakeasyMetadata({ data: "multipart_form, name=prompt" }) + prompt: string; + + @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) + responseFormat?: any; + + @SpeakeasyMetadata({ data: "multipart_form, name=size" }) + size?: any; + + @SpeakeasyMetadata({ data: "multipart_form, name=user" }) + user?: any; } diff --git a/src/sdk/models/shared/createimagerequest.ts b/src/sdk/models/shared/createimagerequest.ts index 58074d0..e494a1a 100755 --- a/src/sdk/models/shared/createimagerequest.ts +++ b/src/sdk/models/shared/createimagerequest.ts @@ -8,50 +8,50 @@ import { Expose } from "class-transformer"; /** * The format in which the generated images are returned. Must be one of `url` or `b64_json`. */ -export enum CreateImageRequestResponseFormatEnum { - Url = "url", - B64Json = "b64_json", +export enum CreateImageRequestResponseFormat { + Url = "url", + B64Json = "b64_json", } /** * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. */ -export enum CreateImageRequestSizeEnum { - TwoHundredAndFiftySixx256 = "256x256", - FiveHundredAndTwelvex512 = "512x512", - OneThousandAndTwentyFourx1024 = "1024x1024", +export enum CreateImageRequestSize { + TwoHundredAndFiftySixx256 = "256x256", + FiveHundredAndTwelvex512 = "512x512", + OneThousandAndTwentyFourx1024 = "1024x1024", } export class CreateImageRequest extends SpeakeasyBase { - /** - * The number of images to generate. Must be between 1 and 10. - */ - @SpeakeasyMetadata() - @Expose({ name: "n" }) - n?: number; - - /** - * A text description of the desired image(s). The maximum length is 1000 characters. - */ - @SpeakeasyMetadata() - @Expose({ name: "prompt" }) - prompt: string; - - /** - * The format in which the generated images are returned. Must be one of `url` or `b64_json`. - */ - @SpeakeasyMetadata() - @Expose({ name: "response_format" }) - responseFormat?: CreateImageRequestResponseFormatEnum; - - /** - * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. - */ - @SpeakeasyMetadata() - @Expose({ name: "size" }) - size?: CreateImageRequestSizeEnum; - - @SpeakeasyMetadata() - @Expose({ name: "user" }) - user?: any; + /** + * The number of images to generate. Must be between 1 and 10. + */ + @SpeakeasyMetadata() + @Expose({ name: "n" }) + n?: number; + + /** + * A text description of the desired image(s). The maximum length is 1000 characters. + */ + @SpeakeasyMetadata() + @Expose({ name: "prompt" }) + prompt: string; + + /** + * The format in which the generated images are returned. Must be one of `url` or `b64_json`. + */ + @SpeakeasyMetadata() + @Expose({ name: "response_format" }) + responseFormat?: CreateImageRequestResponseFormat; + + /** + * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + */ + @SpeakeasyMetadata() + @Expose({ name: "size" }) + size?: CreateImageRequestSize; + + @SpeakeasyMetadata() + @Expose({ name: "user" }) + user?: any; } diff --git a/src/sdk/models/shared/createimagevariationrequest.ts b/src/sdk/models/shared/createimagevariationrequest.ts index af81dd7..f27c789 100755 --- a/src/sdk/models/shared/createimagevariationrequest.ts +++ b/src/sdk/models/shared/createimagevariationrequest.ts @@ -5,29 +5,29 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; export class CreateImageVariationRequestImage extends SpeakeasyBase { - @SpeakeasyMetadata({ data: "multipart_form, content=true" }) - content: Uint8Array; + @SpeakeasyMetadata({ data: "multipart_form, content=true" }) + content: Uint8Array; - @SpeakeasyMetadata({ data: "multipart_form, name=image" }) - image: string; + @SpeakeasyMetadata({ data: "multipart_form, name=image" }) + image: string; } export class CreateImageVariationRequest extends SpeakeasyBase { - /** - * The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. - */ - @SpeakeasyMetadata({ data: "multipart_form, file=true" }) - image: CreateImageVariationRequestImage; + /** + * The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + */ + @SpeakeasyMetadata({ data: "multipart_form, file=true" }) + image: CreateImageVariationRequestImage; - @SpeakeasyMetadata({ data: "multipart_form, name=n" }) - n?: any; + @SpeakeasyMetadata({ data: "multipart_form, name=n" }) + n?: any; - @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) - responseFormat?: any; + @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) + responseFormat?: any; - @SpeakeasyMetadata({ data: "multipart_form, name=size" }) - size?: any; + @SpeakeasyMetadata({ data: "multipart_form, name=size" }) + size?: any; - @SpeakeasyMetadata({ data: "multipart_form, name=user" }) - user?: any; + @SpeakeasyMetadata({ data: "multipart_form, name=user" }) + user?: any; } diff --git a/src/sdk/models/shared/createmoderationrequest.ts b/src/sdk/models/shared/createmoderationrequest.ts index 5671bb5..91368ec 100755 --- a/src/sdk/models/shared/createmoderationrequest.ts +++ b/src/sdk/models/shared/createmoderationrequest.ts @@ -6,22 +6,22 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; export class CreateModerationRequest extends SpeakeasyBase { - /** - * The input text to classify - */ - @SpeakeasyMetadata() - @Expose({ name: "input" }) - input: any; + /** + * The input text to classify + */ + @SpeakeasyMetadata() + @Expose({ name: "input" }) + input: any; - /** - * Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. - * - * @remarks - * - * The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model?: string; + /** + * Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + * + * @remarks + * + * The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model?: string; } diff --git a/src/sdk/models/shared/createmoderationresponse.ts b/src/sdk/models/shared/createmoderationresponse.ts index a801070..d50b16b 100755 --- a/src/sdk/models/shared/createmoderationresponse.ts +++ b/src/sdk/models/shared/createmoderationresponse.ts @@ -6,95 +6,95 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose, Type } from "class-transformer"; export class CreateModerationResponseResultsCategories extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "hate" }) - hate: boolean; + @SpeakeasyMetadata() + @Expose({ name: "hate" }) + hate: boolean; - @SpeakeasyMetadata() - @Expose({ name: "hate/threatening" }) - hateThreatening: boolean; + @SpeakeasyMetadata() + @Expose({ name: "hate/threatening" }) + hateThreatening: boolean; - @SpeakeasyMetadata() - @Expose({ name: "self-harm" }) - selfHarm: boolean; + @SpeakeasyMetadata() + @Expose({ name: "self-harm" }) + selfHarm: boolean; - @SpeakeasyMetadata() - @Expose({ name: "sexual" }) - sexual: boolean; + @SpeakeasyMetadata() + @Expose({ name: "sexual" }) + sexual: boolean; - @SpeakeasyMetadata() - @Expose({ name: "sexual/minors" }) - sexualMinors: boolean; + @SpeakeasyMetadata() + @Expose({ name: "sexual/minors" }) + sexualMinors: boolean; - @SpeakeasyMetadata() - @Expose({ name: "violence" }) - violence: boolean; + @SpeakeasyMetadata() + @Expose({ name: "violence" }) + violence: boolean; - @SpeakeasyMetadata() - @Expose({ name: "violence/graphic" }) - violenceGraphic: boolean; + @SpeakeasyMetadata() + @Expose({ name: "violence/graphic" }) + violenceGraphic: boolean; } export class CreateModerationResponseResultsCategoryScores extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "hate" }) - hate: number; + @SpeakeasyMetadata() + @Expose({ name: "hate" }) + hate: number; - @SpeakeasyMetadata() - @Expose({ name: "hate/threatening" }) - hateThreatening: number; + @SpeakeasyMetadata() + @Expose({ name: "hate/threatening" }) + hateThreatening: number; - @SpeakeasyMetadata() - @Expose({ name: "self-harm" }) - selfHarm: number; + @SpeakeasyMetadata() + @Expose({ name: "self-harm" }) + selfHarm: number; - @SpeakeasyMetadata() - @Expose({ name: "sexual" }) - sexual: number; + @SpeakeasyMetadata() + @Expose({ name: "sexual" }) + sexual: number; - @SpeakeasyMetadata() - @Expose({ name: "sexual/minors" }) - sexualMinors: number; + @SpeakeasyMetadata() + @Expose({ name: "sexual/minors" }) + sexualMinors: number; - @SpeakeasyMetadata() - @Expose({ name: "violence" }) - violence: number; + @SpeakeasyMetadata() + @Expose({ name: "violence" }) + violence: number; - @SpeakeasyMetadata() - @Expose({ name: "violence/graphic" }) - violenceGraphic: number; + @SpeakeasyMetadata() + @Expose({ name: "violence/graphic" }) + violenceGraphic: number; } export class CreateModerationResponseResults extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "categories" }) - @Type(() => CreateModerationResponseResultsCategories) - categories: CreateModerationResponseResultsCategories; - - @SpeakeasyMetadata() - @Expose({ name: "category_scores" }) - @Type(() => CreateModerationResponseResultsCategoryScores) - categoryScores: CreateModerationResponseResultsCategoryScores; - - @SpeakeasyMetadata() - @Expose({ name: "flagged" }) - flagged: boolean; + @SpeakeasyMetadata() + @Expose({ name: "categories" }) + @Type(() => CreateModerationResponseResultsCategories) + categories: CreateModerationResponseResultsCategories; + + @SpeakeasyMetadata() + @Expose({ name: "category_scores" }) + @Type(() => CreateModerationResponseResultsCategoryScores) + categoryScores: CreateModerationResponseResultsCategoryScores; + + @SpeakeasyMetadata() + @Expose({ name: "flagged" }) + flagged: boolean; } /** * OK */ export class CreateModerationResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "id" }) - id: string; - - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model: string; - - @SpeakeasyMetadata({ elemType: CreateModerationResponseResults }) - @Expose({ name: "results" }) - @Type(() => CreateModerationResponseResults) - results: CreateModerationResponseResults[]; + @SpeakeasyMetadata() + @Expose({ name: "id" }) + id: string; + + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model: string; + + @SpeakeasyMetadata({ elemType: CreateModerationResponseResults }) + @Expose({ name: "results" }) + @Type(() => CreateModerationResponseResults) + results: CreateModerationResponseResults[]; } diff --git a/src/sdk/models/shared/createsearchrequest.ts b/src/sdk/models/shared/createsearchrequest.ts index 57cd2b9..08bec4f 100755 --- a/src/sdk/models/shared/createsearchrequest.ts +++ b/src/sdk/models/shared/createsearchrequest.ts @@ -6,64 +6,64 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; export class CreateSearchRequest extends SpeakeasyBase { - /** - * Up to 200 documents to search over, provided as a list of strings. - * - * @remarks - * - * The maximum document length (in tokens) is 2034 minus the number of tokens in the query. - * - * You should specify either `documents` or a `file`, but not both. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "documents" }) - documents?: string[]; + /** + * Up to 200 documents to search over, provided as a list of strings. + * + * @remarks + * + * The maximum document length (in tokens) is 2034 minus the number of tokens in the query. + * + * You should specify either `documents` or a `file`, but not both. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "documents" }) + documents?: string[]; - /** - * The ID of an uploaded file that contains documents to search over. - * - * @remarks - * - * You should specify either `documents` or a `file`, but not both. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "file" }) - file?: string; + /** + * The ID of an uploaded file that contains documents to search over. + * + * @remarks + * + * You should specify either `documents` or a `file`, but not both. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "file" }) + file?: string; - /** - * The maximum number of documents to be re-ranked and returned by search. - * - * @remarks - * - * This flag only takes effect when `file` is set. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "max_rerank" }) - maxRerank?: number; + /** + * The maximum number of documents to be re-ranked and returned by search. + * + * @remarks + * + * This flag only takes effect when `file` is set. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "max_rerank" }) + maxRerank?: number; - /** - * Query to search against the documents. - */ - @SpeakeasyMetadata() - @Expose({ name: "query" }) - query: string; + /** + * Query to search against the documents. + */ + @SpeakeasyMetadata() + @Expose({ name: "query" }) + query: string; - /** - * A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a "metadata" field. - * - * @remarks - * - * This flag only takes effect when `file` is set. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "return_metadata" }) - returnMetadata?: boolean; + /** + * A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a "metadata" field. + * + * @remarks + * + * This flag only takes effect when `file` is set. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "return_metadata" }) + returnMetadata?: boolean; - @SpeakeasyMetadata() - @Expose({ name: "user" }) - user?: any; + @SpeakeasyMetadata() + @Expose({ name: "user" }) + user?: any; } diff --git a/src/sdk/models/shared/createsearchresponse.ts b/src/sdk/models/shared/createsearchresponse.ts index 36bd2e4..14dd5fc 100755 --- a/src/sdk/models/shared/createsearchresponse.ts +++ b/src/sdk/models/shared/createsearchresponse.ts @@ -6,33 +6,33 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose, Type } from "class-transformer"; export class CreateSearchResponseData extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "document" }) - document?: number; + @SpeakeasyMetadata() + @Expose({ name: "document" }) + document?: number; - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object?: string; + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object?: string; - @SpeakeasyMetadata() - @Expose({ name: "score" }) - score?: number; + @SpeakeasyMetadata() + @Expose({ name: "score" }) + score?: number; } /** * OK */ export class CreateSearchResponse extends SpeakeasyBase { - @SpeakeasyMetadata({ elemType: CreateSearchResponseData }) - @Expose({ name: "data" }) - @Type(() => CreateSearchResponseData) - data?: CreateSearchResponseData[]; + @SpeakeasyMetadata({ elemType: CreateSearchResponseData }) + @Expose({ name: "data" }) + @Type(() => CreateSearchResponseData) + data?: CreateSearchResponseData[]; - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model?: string; + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model?: string; - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object?: string; + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object?: string; } diff --git a/src/sdk/models/shared/createtranscriptionrequest.ts b/src/sdk/models/shared/createtranscriptionrequest.ts index 61ebbc8..feb1ac4 100755 --- a/src/sdk/models/shared/createtranscriptionrequest.ts +++ b/src/sdk/models/shared/createtranscriptionrequest.ts @@ -5,65 +5,65 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; export class CreateTranscriptionRequestFile extends SpeakeasyBase { - @SpeakeasyMetadata({ data: "multipart_form, content=true" }) - content: Uint8Array; + @SpeakeasyMetadata({ data: "multipart_form, content=true" }) + content: Uint8Array; - @SpeakeasyMetadata({ data: "multipart_form, name=file" }) - file: string; + @SpeakeasyMetadata({ data: "multipart_form, name=file" }) + file: string; } export class CreateTranscriptionRequest extends SpeakeasyBase { - /** - * The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, file=true" }) - file: CreateTranscriptionRequestFile; + /** + * The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, file=true" }) + file: CreateTranscriptionRequestFile; - /** - * The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=language" }) - language?: string; + /** + * The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=language" }) + language?: string; - /** - * ID of the model to use. Only `whisper-1` is currently available. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=model" }) - model: string; + /** + * ID of the model to use. Only `whisper-1` is currently available. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=model" }) + model: string; - /** - * An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=prompt" }) - prompt?: string; + /** + * An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=prompt" }) + prompt?: string; - /** - * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) - responseFormat?: string; + /** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) + responseFormat?: string; - /** - * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=temperature" }) - temperature?: number; + /** + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=temperature" }) + temperature?: number; } diff --git a/src/sdk/models/shared/createtranscriptionresponse.ts b/src/sdk/models/shared/createtranscriptionresponse.ts index 8130ab3..12a63c9 100755 --- a/src/sdk/models/shared/createtranscriptionresponse.ts +++ b/src/sdk/models/shared/createtranscriptionresponse.ts @@ -9,7 +9,7 @@ import { Expose } from "class-transformer"; * OK */ export class CreateTranscriptionResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "text" }) - text: string; + @SpeakeasyMetadata() + @Expose({ name: "text" }) + text: string; } diff --git a/src/sdk/models/shared/createtranslationrequest.ts b/src/sdk/models/shared/createtranslationrequest.ts index 95f2a38..19c555c 100755 --- a/src/sdk/models/shared/createtranslationrequest.ts +++ b/src/sdk/models/shared/createtranslationrequest.ts @@ -5,56 +5,56 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; export class CreateTranslationRequestFile extends SpeakeasyBase { - @SpeakeasyMetadata({ data: "multipart_form, content=true" }) - content: Uint8Array; + @SpeakeasyMetadata({ data: "multipart_form, content=true" }) + content: Uint8Array; - @SpeakeasyMetadata({ data: "multipart_form, name=file" }) - file: string; + @SpeakeasyMetadata({ data: "multipart_form, name=file" }) + file: string; } export class CreateTranslationRequest extends SpeakeasyBase { - /** - * The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, file=true" }) - file: CreateTranslationRequestFile; - - /** - * ID of the model to use. Only `whisper-1` is currently available. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=model" }) - model: string; - - /** - * An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=prompt" }) - prompt?: string; - - /** - * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) - responseFormat?: string; - - /** - * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=temperature" }) - temperature?: number; + /** + * The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, file=true" }) + file: CreateTranslationRequestFile; + + /** + * ID of the model to use. Only `whisper-1` is currently available. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=model" }) + model: string; + + /** + * An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=prompt" }) + prompt?: string; + + /** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) + responseFormat?: string; + + /** + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=temperature" }) + temperature?: number; } diff --git a/src/sdk/models/shared/createtranslationresponse.ts b/src/sdk/models/shared/createtranslationresponse.ts index 92f337d..b299cbd 100755 --- a/src/sdk/models/shared/createtranslationresponse.ts +++ b/src/sdk/models/shared/createtranslationresponse.ts @@ -9,7 +9,7 @@ import { Expose } from "class-transformer"; * OK */ export class CreateTranslationResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "text" }) - text: string; + @SpeakeasyMetadata() + @Expose({ name: "text" }) + text: string; } diff --git a/src/sdk/models/shared/deletefileresponse.ts b/src/sdk/models/shared/deletefileresponse.ts index 31fd926..30e9647 100755 --- a/src/sdk/models/shared/deletefileresponse.ts +++ b/src/sdk/models/shared/deletefileresponse.ts @@ -9,15 +9,15 @@ import { Expose } from "class-transformer"; * OK */ export class DeleteFileResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "deleted" }) - deleted: boolean; + @SpeakeasyMetadata() + @Expose({ name: "deleted" }) + deleted: boolean; - @SpeakeasyMetadata() - @Expose({ name: "id" }) - id: string; + @SpeakeasyMetadata() + @Expose({ name: "id" }) + id: string; - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; } diff --git a/src/sdk/models/shared/deletemodelresponse.ts b/src/sdk/models/shared/deletemodelresponse.ts index 33fab10..11986a9 100755 --- a/src/sdk/models/shared/deletemodelresponse.ts +++ b/src/sdk/models/shared/deletemodelresponse.ts @@ -9,15 +9,15 @@ import { Expose } from "class-transformer"; * OK */ export class DeleteModelResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "deleted" }) - deleted: boolean; + @SpeakeasyMetadata() + @Expose({ name: "deleted" }) + deleted: boolean; - @SpeakeasyMetadata() - @Expose({ name: "id" }) - id: string; + @SpeakeasyMetadata() + @Expose({ name: "id" }) + id: string; - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; } diff --git a/src/sdk/models/shared/engine.ts b/src/sdk/models/shared/engine.ts new file mode 100755 index 0000000..7b329fd --- /dev/null +++ b/src/sdk/models/shared/engine.ts @@ -0,0 +1,27 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Expose } from "class-transformer"; + +/** + * OK + */ +export class Engine extends SpeakeasyBase { + @SpeakeasyMetadata() + @Expose({ name: "created" }) + created: number; + + @SpeakeasyMetadata() + @Expose({ name: "id" }) + id: string; + + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; + + @SpeakeasyMetadata() + @Expose({ name: "ready" }) + ready: boolean; +} diff --git a/src/sdk/models/shared/finetune.ts b/src/sdk/models/shared/finetune.ts new file mode 100755 index 0000000..bfeb9a5 --- /dev/null +++ b/src/sdk/models/shared/finetune.ts @@ -0,0 +1,69 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { FineTuneEvent } from "./finetuneevent"; +import { OpenAIFile } from "./openaifile"; +import { Expose, Type } from "class-transformer"; + +/** + * OK + */ +export class FineTune extends SpeakeasyBase { + @SpeakeasyMetadata() + @Expose({ name: "created_at" }) + createdAt: number; + + @SpeakeasyMetadata({ elemType: FineTuneEvent }) + @Expose({ name: "events" }) + @Type(() => FineTuneEvent) + events?: FineTuneEvent[]; + + @SpeakeasyMetadata() + @Expose({ name: "fine_tuned_model" }) + fineTunedModel: string; + + @SpeakeasyMetadata() + @Expose({ name: "hyperparams" }) + hyperparams: Record; + + @SpeakeasyMetadata() + @Expose({ name: "id" }) + id: string; + + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model: string; + + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; + + @SpeakeasyMetadata() + @Expose({ name: "organization_id" }) + organizationId: string; + + @SpeakeasyMetadata({ elemType: OpenAIFile }) + @Expose({ name: "result_files" }) + @Type(() => OpenAIFile) + resultFiles: OpenAIFile[]; + + @SpeakeasyMetadata() + @Expose({ name: "status" }) + status: string; + + @SpeakeasyMetadata({ elemType: OpenAIFile }) + @Expose({ name: "training_files" }) + @Type(() => OpenAIFile) + trainingFiles: OpenAIFile[]; + + @SpeakeasyMetadata() + @Expose({ name: "updated_at" }) + updatedAt: number; + + @SpeakeasyMetadata({ elemType: OpenAIFile }) + @Expose({ name: "validation_files" }) + @Type(() => OpenAIFile) + validationFiles: OpenAIFile[]; +} diff --git a/src/sdk/models/shared/finetuneevent.ts b/src/sdk/models/shared/finetuneevent.ts new file mode 100755 index 0000000..94b7efd --- /dev/null +++ b/src/sdk/models/shared/finetuneevent.ts @@ -0,0 +1,24 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Expose } from "class-transformer"; + +export class FineTuneEvent extends SpeakeasyBase { + @SpeakeasyMetadata() + @Expose({ name: "created_at" }) + createdAt: number; + + @SpeakeasyMetadata() + @Expose({ name: "level" }) + level: string; + + @SpeakeasyMetadata() + @Expose({ name: "message" }) + message: string; + + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; +} diff --git a/src/sdk/models/shared/imagesresponse.ts b/src/sdk/models/shared/imagesresponse.ts new file mode 100755 index 0000000..de0eac2 --- /dev/null +++ b/src/sdk/models/shared/imagesresponse.ts @@ -0,0 +1,30 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Expose, Type } from "class-transformer"; + +export class ImagesResponseData extends SpeakeasyBase { + @SpeakeasyMetadata() + @Expose({ name: "b64_json" }) + b64Json?: string; + + @SpeakeasyMetadata() + @Expose({ name: "url" }) + url?: string; +} + +/** + * OK + */ +export class ImagesResponse extends SpeakeasyBase { + @SpeakeasyMetadata() + @Expose({ name: "created" }) + created: number; + + @SpeakeasyMetadata({ elemType: ImagesResponseData }) + @Expose({ name: "data" }) + @Type(() => ImagesResponseData) + data: ImagesResponseData[]; +} diff --git a/src/sdk/models/shared/index.ts b/src/sdk/models/shared/index.ts index 6f142b3..e70595e 100755 --- a/src/sdk/models/shared/index.ts +++ b/src/sdk/models/shared/index.ts @@ -31,8 +31,14 @@ export * from "./createtranslationrequest"; export * from "./createtranslationresponse"; export * from "./deletefileresponse"; export * from "./deletemodelresponse"; +export * from "./engine"; +export * from "./finetune"; +export * from "./finetuneevent"; +export * from "./imagesresponse"; export * from "./listenginesresponse"; export * from "./listfilesresponse"; export * from "./listfinetuneeventsresponse"; export * from "./listfinetunesresponse"; export * from "./listmodelsresponse"; +export * from "./model"; +export * from "./openaifile"; diff --git a/src/sdk/models/shared/listenginesresponse.ts b/src/sdk/models/shared/listenginesresponse.ts index b4384e2..ce86403 100755 --- a/src/sdk/models/shared/listenginesresponse.ts +++ b/src/sdk/models/shared/listenginesresponse.ts @@ -3,17 +3,19 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; +import { Engine } from "./engine"; +import { Expose, Type } from "class-transformer"; /** * OK */ export class ListEnginesResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "data" }) - data: any[]; + @SpeakeasyMetadata({ elemType: Engine }) + @Expose({ name: "data" }) + @Type(() => Engine) + data: Engine[]; - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; } diff --git a/src/sdk/models/shared/listfilesresponse.ts b/src/sdk/models/shared/listfilesresponse.ts index 842b0e3..cbbfddc 100755 --- a/src/sdk/models/shared/listfilesresponse.ts +++ b/src/sdk/models/shared/listfilesresponse.ts @@ -3,17 +3,19 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; +import { OpenAIFile } from "./openaifile"; +import { Expose, Type } from "class-transformer"; /** * OK */ export class ListFilesResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "data" }) - data: any[]; + @SpeakeasyMetadata({ elemType: OpenAIFile }) + @Expose({ name: "data" }) + @Type(() => OpenAIFile) + data: OpenAIFile[]; - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; } diff --git a/src/sdk/models/shared/listfinetuneeventsresponse.ts b/src/sdk/models/shared/listfinetuneeventsresponse.ts index e19c404..d670361 100755 --- a/src/sdk/models/shared/listfinetuneeventsresponse.ts +++ b/src/sdk/models/shared/listfinetuneeventsresponse.ts @@ -3,17 +3,19 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; +import { FineTuneEvent } from "./finetuneevent"; +import { Expose, Type } from "class-transformer"; /** * OK */ export class ListFineTuneEventsResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "data" }) - data: any[]; + @SpeakeasyMetadata({ elemType: FineTuneEvent }) + @Expose({ name: "data" }) + @Type(() => FineTuneEvent) + data: FineTuneEvent[]; - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; } diff --git a/src/sdk/models/shared/listfinetunesresponse.ts b/src/sdk/models/shared/listfinetunesresponse.ts index c381407..897f927 100755 --- a/src/sdk/models/shared/listfinetunesresponse.ts +++ b/src/sdk/models/shared/listfinetunesresponse.ts @@ -3,17 +3,19 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; +import { FineTune } from "./finetune"; +import { Expose, Type } from "class-transformer"; /** * OK */ export class ListFineTunesResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "data" }) - data: any[]; + @SpeakeasyMetadata({ elemType: FineTune }) + @Expose({ name: "data" }) + @Type(() => FineTune) + data: FineTune[]; - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; } diff --git a/src/sdk/models/shared/listmodelsresponse.ts b/src/sdk/models/shared/listmodelsresponse.ts index fc621ca..0fe5605 100755 --- a/src/sdk/models/shared/listmodelsresponse.ts +++ b/src/sdk/models/shared/listmodelsresponse.ts @@ -3,17 +3,19 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; +import { Model } from "./model"; +import { Expose, Type } from "class-transformer"; /** * OK */ export class ListModelsResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "data" }) - data: any[]; + @SpeakeasyMetadata({ elemType: Model }) + @Expose({ name: "data" }) + @Type(() => Model) + data: Model[]; - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; } diff --git a/src/sdk/models/shared/model.ts b/src/sdk/models/shared/model.ts new file mode 100755 index 0000000..9b30683 --- /dev/null +++ b/src/sdk/models/shared/model.ts @@ -0,0 +1,27 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Expose } from "class-transformer"; + +/** + * OK + */ +export class Model extends SpeakeasyBase { + @SpeakeasyMetadata() + @Expose({ name: "created" }) + created: number; + + @SpeakeasyMetadata() + @Expose({ name: "id" }) + id: string; + + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; + + @SpeakeasyMetadata() + @Expose({ name: "owned_by" }) + ownedBy: string; +} diff --git a/src/sdk/models/shared/openaifile.ts b/src/sdk/models/shared/openaifile.ts new file mode 100755 index 0000000..08b1a8c --- /dev/null +++ b/src/sdk/models/shared/openaifile.ts @@ -0,0 +1,43 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Expose } from "class-transformer"; + +/** + * OK + */ +export class OpenAIFile extends SpeakeasyBase { + @SpeakeasyMetadata() + @Expose({ name: "bytes" }) + bytes: number; + + @SpeakeasyMetadata() + @Expose({ name: "created_at" }) + createdAt: number; + + @SpeakeasyMetadata() + @Expose({ name: "filename" }) + filename: string; + + @SpeakeasyMetadata() + @Expose({ name: "id" }) + id: string; + + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; + + @SpeakeasyMetadata() + @Expose({ name: "purpose" }) + purpose: string; + + @SpeakeasyMetadata() + @Expose({ name: "status" }) + status?: string; + + @SpeakeasyMetadata() + @Expose({ name: "status_details" }) + statusDetails?: Record; +} diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index 3c566ab..5a8a4ef 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -5,1676 +5,1823 @@ import * as utils from "../internal/utils"; import * as operations from "./models/operations"; import * as shared from "./models/shared"; +import { SDKConfiguration } from "./sdk"; import { AxiosInstance, AxiosRequestConfig, AxiosResponse } from "axios"; /** * The OpenAI REST API */ export class OpenAI { - _defaultClient: AxiosInstance; - _securityClient: AxiosInstance; - _serverURL: string; - _language: string; - _sdkVersion: string; - _genVersion: string; - - constructor( - defaultClient: AxiosInstance, - securityClient: AxiosInstance, - serverURL: string, - language: string, - sdkVersion: string, - genVersion: string - ) { - this._defaultClient = defaultClient; - this._securityClient = securityClient; - this._serverURL = serverURL; - this._language = language; - this._sdkVersion = sdkVersion; - this._genVersion = genVersion; - } - - /** - * Immediately cancel a fine-tune job. - * - */ - cancelFineTune( - req: operations.CancelFineTuneRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.CancelFineTuneRequest(req); + private sdkConfiguration: SDKConfiguration; + + constructor(sdkConfig: SDKConfiguration) { + this.sdkConfiguration = sdkConfig; } - const baseURL: string = this._serverURL; - const url: string = utils.generateURL( - baseURL, - "/fine-tunes/{fine_tune_id}/cancel", - req - ); - - const client: AxiosInstance = this._defaultClient; - - const r = client.request({ - url: url, - method: "post", - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CancelFineTuneResponse = - new operations.CancelFineTuneResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + /** + * Immediately cancel a fine-tune job. + * + */ + async cancelFineTune( + req: operations.CancelFineTuneRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.CancelFineTuneRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}/cancel", req); + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + ...config, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.fineTune = httpRes?.data; - } - break; - } - - return res; - }); - } - - /** - * Answers the specified question using the provided documents and examples. - * - * The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). - * - */ - createAnswer( - req: shared.CreateAnswerRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateAnswerRequest(req); - } - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/answers"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "request", - "json" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateAnswerResponse = - new operations.CreateAnswerResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + const res: operations.CancelFineTuneResponse = new operations.CancelFineTuneResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createAnswerResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.CreateAnswerResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Creates a completion for the chat message - */ - createChatCompletion( - req: shared.CreateChatCompletionRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateChatCompletionRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.fineTune = utils.objectToClass(httpRes?.data, shared.FineTune); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/chat/completions"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "request", - "json" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } + /** + * Answers the specified question using the provided documents and examples. + * + * The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). + * + * + * @deprecated this method will be removed in a future release, please migrate away from it as soon as possible + */ + async createAnswer( + req: shared.CreateAnswerRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateAnswerRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/answers"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, + }); - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateChatCompletionResponse = - new operations.CreateChatCompletionResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateAnswerResponse = new operations.CreateAnswerResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createChatCompletionResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.CreateChatCompletionResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Classifies the specified `query` using provided examples. - * - * The endpoint first [searches](/docs/api-reference/searches) over the labeled examples - * to select the ones most relevant for the particular query. Then, the relevant examples - * are combined with the query to construct a prompt to produce the final label via the - * [completions](/docs/api-reference/completions) endpoint. - * - * Labeled examples can be provided via an uploaded `file`, or explicitly listed in the - * request using the `examples` parameter for quick tests and small scale use cases. - * - */ - createClassification( - req: shared.CreateClassificationRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateClassificationRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createAnswerResponse = utils.objectToClass( + httpRes?.data, + shared.CreateAnswerResponse + ); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/classifications"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "request", - "json" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } + /** + * Creates a completion for the chat message + */ + async createChatCompletion( + req: shared.CreateChatCompletionRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateChatCompletionRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/chat/completions"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateChatCompletionResponse = + new operations.CreateChatCompletionResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createChatCompletionResponse = utils.objectToClass( + httpRes?.data, + shared.CreateChatCompletionResponse + ); + } + break; + } + + return res; } - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateClassificationResponse = - new operations.CreateClassificationResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + /** + * Classifies the specified `query` using provided examples. + * + * The endpoint first [searches](/docs/api-reference/searches) over the labeled examples + * to select the ones most relevant for the particular query. Then, the relevant examples + * are combined with the query to construct a prompt to produce the final label via the + * [completions](/docs/api-reference/completions) endpoint. + * + * Labeled examples can be provided via an uploaded `file`, or explicitly listed in the + * request using the `examples` parameter for quick tests and small scale use cases. + * + * + * @deprecated this method will be removed in a future release, please migrate away from it as soon as possible + */ + async createClassification( + req: shared.CreateClassificationRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateClassificationRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/classifications"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createClassificationResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.CreateClassificationResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Creates a completion for the provided prompt and parameters - */ - createCompletion( - req: shared.CreateCompletionRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateCompletionRequest(req); - } - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/completions"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "request", - "json" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateClassificationResponse = + new operations.CreateClassificationResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createClassificationResponse = utils.objectToClass( + httpRes?.data, + shared.CreateClassificationResponse + ); + } + break; + } + + return res; } - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateCompletionResponse = - new operations.CreateCompletionResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + /** + * Creates a completion for the provided prompt and parameters + */ + async createCompletion( + req: shared.CreateCompletionRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateCompletionRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/completions"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createCompletionResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.CreateCompletionResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Creates a new edit for the provided input, instruction, and parameters. - */ - createEdit( - req: shared.CreateEditRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateEditRequest(req); - } - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/edits"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "request", - "json" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateEditResponse = - new operations.CreateEditResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + const res: operations.CreateCompletionResponse = new operations.CreateCompletionResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createEditResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.CreateEditResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Creates an embedding vector representing the input text. - */ - createEmbedding( - req: shared.CreateEmbeddingRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateEmbeddingRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createCompletionResponse = utils.objectToClass( + httpRes?.data, + shared.CreateCompletionResponse + ); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/embeddings"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "request", - "json" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } + /** + * Creates a new edit for the provided input, instruction, and parameters. + */ + async createEdit( + req: shared.CreateEditRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateEditRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/edits"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, + }); - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateEmbeddingResponse = - new operations.CreateEmbeddingResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateEditResponse = new operations.CreateEditResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createEmbeddingResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.CreateEmbeddingResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. - * - */ - createFile( - req: shared.CreateFileRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateFileRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createEditResponse = utils.objectToClass( + httpRes?.data, + shared.CreateEditResponse + ); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/files"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "request", - "multipart" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } + /** + * Creates an embedding vector representing the input text. + */ + async createEmbedding( + req: shared.CreateEmbeddingRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateEmbeddingRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/embeddings"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateFileResponse = - new operations.CreateFileResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + const res: operations.CreateEmbeddingResponse = new operations.CreateEmbeddingResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.openAIFile = httpRes?.data; - } - break; - } - - return res; - }); - } - - /** - * Creates a job that fine-tunes a specified model from a given dataset. - * - * Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - * - * [Learn more about Fine-tuning](/docs/guides/fine-tuning) - * - */ - createFineTune( - req: shared.CreateFineTuneRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateFineTuneRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createEmbeddingResponse = utils.objectToClass( + httpRes?.data, + shared.CreateEmbeddingResponse + ); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/fine-tunes"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "request", - "json" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } + /** + * Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. + * + */ + async createFile( + req: shared.CreateFileRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateFileRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/files"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateFineTuneResponse = - new operations.CreateFineTuneResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + const res: operations.CreateFileResponse = new operations.CreateFileResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.fineTune = httpRes?.data; - } - break; - } - - return res; - }); - } - - /** - * Creates an image given a prompt. - */ - createImage( - req: shared.CreateImageRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateImageRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.openAIFile = utils.objectToClass(httpRes?.data, shared.OpenAIFile); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/images/generations"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "request", - "json" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } + /** + * Creates a job that fine-tunes a specified model from a given dataset. + * + * Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + * + * [Learn more about Fine-tuning](/docs/guides/fine-tuning) + * + */ + async createFineTune( + req: shared.CreateFineTuneRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateFineTuneRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/fine-tunes"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateImageResponse = - new operations.CreateImageResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateFineTuneResponse = new operations.CreateFineTuneResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.imagesResponse = httpRes?.data; - } - break; - } - - return res; - }); - } - - /** - * Creates an edited or extended image given an original image and a prompt. - */ - createImageEdit( - req: shared.CreateImageEditRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateImageEditRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.fineTune = utils.objectToClass(httpRes?.data, shared.FineTune); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/images/edits"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "request", - "multipart" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } + /** + * Creates an image given a prompt. + */ + async createImage( + req: shared.CreateImageRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateImageRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/images/generations"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateImageEditResponse = - new operations.CreateImageEditResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateImageResponse = new operations.CreateImageResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.imagesResponse = httpRes?.data; - } - break; - } - - return res; - }); - } - - /** - * Creates a variation of a given image. - */ - createImageVariation( - req: shared.CreateImageVariationRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateImageVariationRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.imagesResponse = utils.objectToClass(httpRes?.data, shared.ImagesResponse); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/images/variations"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "request", - "multipart" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } + /** + * Creates an edited or extended image given an original image and a prompt. + */ + async createImageEdit( + req: shared.CreateImageEditRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateImageEditRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/images/edits"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, + }); - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateImageVariationResponse = - new operations.CreateImageVariationResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateImageEditResponse = new operations.CreateImageEditResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.imagesResponse = httpRes?.data; - } - break; - } - - return res; - }); - } - - /** - * Classifies if text violates OpenAI's Content Policy - */ - createModeration( - req: shared.CreateModerationRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateModerationRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.imagesResponse = utils.objectToClass(httpRes?.data, shared.ImagesResponse); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/moderations"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "request", - "json" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } + /** + * Creates a variation of a given image. + */ + async createImageVariation( + req: shared.CreateImageVariationRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateImageVariationRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/images/variations"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateImageVariationResponse = + new operations.CreateImageVariationResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.imagesResponse = utils.objectToClass(httpRes?.data, shared.ImagesResponse); + } + break; + } + + return res; } - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateModerationResponse = - new operations.CreateModerationResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + /** + * Classifies if text violates OpenAI's Content Policy + */ + async createModeration( + req: shared.CreateModerationRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateModerationRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/moderations"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createModerationResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.CreateModerationResponse - ); - } - break; - } - - return res; - }); - } - - /** - * The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. - * - * To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. - * - * The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. - * - */ - createSearch( - req: operations.CreateSearchRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.CreateSearchRequest(req); - } - const baseURL: string = this._serverURL; - const url: string = utils.generateURL( - baseURL, - "/engines/{engine_id}/search", - req - ); - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "createSearchRequest", - "json" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateSearchResponse = - new operations.CreateSearchResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateModerationResponse = new operations.CreateModerationResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createSearchResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.CreateSearchResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Transcribes audio into the input language. - */ - createTranscription( - req: shared.CreateTranscriptionRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateTranscriptionRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createModerationResponse = utils.objectToClass( + httpRes?.data, + shared.CreateModerationResponse + ); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/audio/transcriptions"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "request", - "multipart" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } + /** + * The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. + * + * To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. + * + * The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. + * + * + * @deprecated this method will be removed in a future release, please migrate away from it as soon as possible + */ + async createSearch( + req: operations.CreateSearchRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.CreateSearchRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/engines/{engine_id}/search", req); + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody( + req, + "createSearchRequest", + "json" + ); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateTranscriptionResponse = - new operations.CreateTranscriptionResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + const res: operations.CreateSearchResponse = new operations.CreateSearchResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createTranscriptionResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.CreateTranscriptionResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Translates audio into into English. - */ - createTranslation( - req: shared.CreateTranslationRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateTranslationRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createSearchResponse = utils.objectToClass( + httpRes?.data, + shared.CreateSearchResponse + ); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/audio/translations"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "request", - "multipart" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } + /** + * Transcribes audio into the input language. + */ + async createTranscription( + req: shared.CreateTranscriptionRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateTranscriptionRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/audio/transcriptions"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateTranscriptionResponse = + new operations.CreateTranscriptionResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createTranscriptionResponse = utils.objectToClass( + httpRes?.data, + shared.CreateTranscriptionResponse + ); + } + break; + } + + return res; } - const client: AxiosInstance = this._defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - - const r = client.request({ - url: url, - method: "post", - headers: headers, - data: reqBody, - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.CreateTranslationResponse = - new operations.CreateTranslationResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + /** + * Translates audio into into English. + */ + async createTranslation( + req: shared.CreateTranslationRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateTranslationRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/audio/translations"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + data: reqBody, + ...config, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createTranslationResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.CreateTranslationResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Delete a file. - */ - deleteFile( - req: operations.DeleteFileRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.DeleteFileRequest(req); - } - const baseURL: string = this._serverURL; - const url: string = utils.generateURL(baseURL, "/files/{file_id}", req); + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateTranslationResponse = new operations.CreateTranslationResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createTranslationResponse = utils.objectToClass( + httpRes?.data, + shared.CreateTranslationResponse + ); + } + break; + } + + return res; + } - const client: AxiosInstance = this._defaultClient; + /** + * Delete a file. + */ + async deleteFile( + req: operations.DeleteFileRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.DeleteFileRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/files/{file_id}", req); + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "delete", + headers: headers, + ...config, + }); - const r = client.request({ - url: url, - method: "delete", - ...config, - }); + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.DeleteFileResponse = - new operations.DeleteFileResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + const res: operations.DeleteFileResponse = new operations.DeleteFileResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.deleteFileResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.DeleteFileResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Delete a fine-tuned model. You must have the Owner role in your organization. - */ - deleteModel( - req: operations.DeleteModelRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.DeleteModelRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.deleteFileResponse = utils.objectToClass( + httpRes?.data, + shared.DeleteFileResponse + ); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = utils.generateURL(baseURL, "/models/{model}", req); - - const client: AxiosInstance = this._defaultClient; + /** + * Delete a fine-tuned model. You must have the Owner role in your organization. + */ + async deleteModel( + req: operations.DeleteModelRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.DeleteModelRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/models/{model}", req); + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "delete", + headers: headers, + ...config, + }); - const r = client.request({ - url: url, - method: "delete", - ...config, - }); + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.DeleteModelResponse = - new operations.DeleteModelResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + const res: operations.DeleteModelResponse = new operations.DeleteModelResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.deleteModelResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.DeleteModelResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Returns the contents of the specified file - */ - downloadFile( - req: operations.DownloadFileRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.DownloadFileRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.deleteModelResponse = utils.objectToClass( + httpRes?.data, + shared.DeleteModelResponse + ); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = utils.generateURL( - baseURL, - "/files/{file_id}/content", - req - ); - - const client: AxiosInstance = this._defaultClient; - - const r = client.request({ - url: url, - method: "get", - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.DownloadFileResponse = - new operations.DownloadFileResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + /** + * Returns the contents of the specified file + */ + async downloadFile( + req: operations.DownloadFileRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.DownloadFileRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/files/{file_id}/content", req); + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + ...config, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.downloadFile200ApplicationJSONString = JSON.stringify( - httpRes?.data - ); - } - break; - } - - return res; - }); - } - - /** - * Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. - */ - listEngines( - config?: AxiosRequestConfig - ): Promise { - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/engines"; - - const client: AxiosInstance = this._defaultClient; - - const r = client.request({ - url: url, - method: "get", - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.ListEnginesResponse = - new operations.ListEnginesResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.listEnginesResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.ListEnginesResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Returns a list of files that belong to the user's organization. - */ - listFiles( - config?: AxiosRequestConfig - ): Promise { - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/files"; - - const client: AxiosInstance = this._defaultClient; - - const r = client.request({ - url: url, - method: "get", - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.ListFilesResponse = - new operations.ListFilesResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.DownloadFileResponse = new operations.DownloadFileResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.listFilesResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.ListFilesResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Get fine-grained status updates for a fine-tune job. - * - */ - listFineTuneEvents( - req: operations.ListFineTuneEventsRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.ListFineTuneEventsRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.downloadFile200ApplicationJSONString = JSON.stringify(httpRes?.data); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = utils.generateURL( - baseURL, - "/fine-tunes/{fine_tune_id}/events", - req - ); - - const client: AxiosInstance = this._defaultClient; - - const queryParams: string = utils.serializeQueryParams(req); - - const r = client.request({ - url: url + queryParams, - method: "get", - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.ListFineTuneEventsResponse = - new operations.ListFineTuneEventsResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + /** + * Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. + * + * @deprecated this method will be removed in a future release, please migrate away from it as soon as possible + */ + async listEngines(config?: AxiosRequestConfig): Promise { + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/engines"; + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + ...config, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.listFineTuneEventsResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.ListFineTuneEventsResponse - ); - } - break; - } - - return res; - }); - } - - /** - * List your organization's fine-tuning jobs - * - */ - listFineTunes( - config?: AxiosRequestConfig - ): Promise { - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/fine-tunes"; - - const client: AxiosInstance = this._defaultClient; - - const r = client.request({ - url: url, - method: "get", - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.ListFineTunesResponse = - new operations.ListFineTunesResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.ListEnginesResponse = new operations.ListEnginesResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.listFineTunesResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.ListFineTunesResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Lists the currently available models, and provides basic information about each one such as the owner and availability. - */ - listModels( - config?: AxiosRequestConfig - ): Promise { - const baseURL: string = this._serverURL; - const url: string = baseURL.replace(/\/$/, "") + "/models"; - - const client: AxiosInstance = this._defaultClient; - - const r = client.request({ - url: url, - method: "get", - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.ListModelsResponse = - new operations.ListModelsResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.listEnginesResponse = utils.objectToClass( + httpRes?.data, + shared.ListEnginesResponse + ); + } + break; + } + + return res; + } + + /** + * Returns a list of files that belong to the user's organization. + */ + async listFiles(config?: AxiosRequestConfig): Promise { + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/files"; + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + ...config, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.listModelsResponse = utils.deserializeJSONResponse( - httpRes?.data, - shared.ListModelsResponse - ); - } - break; - } - - return res; - }); - } - - /** - * Retrieves a model instance, providing basic information about it such as the owner and availability. - */ - retrieveEngine( - req: operations.RetrieveEngineRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.RetrieveEngineRequest(req); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.ListFilesResponse = new operations.ListFilesResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.listFilesResponse = utils.objectToClass( + httpRes?.data, + shared.ListFilesResponse + ); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = utils.generateURL(baseURL, "/engines/{engine_id}", req); + /** + * Get fine-grained status updates for a fine-tune job. + * + */ + async listFineTuneEvents( + req: operations.ListFineTuneEventsRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.ListFineTuneEventsRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}/events", req); + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + const queryParams: string = utils.serializeQueryParams(req); + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url + queryParams, + method: "get", + headers: headers, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.ListFineTuneEventsResponse = + new operations.ListFineTuneEventsResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.listFineTuneEventsResponse = utils.objectToClass( + httpRes?.data, + shared.ListFineTuneEventsResponse + ); + } + break; + } + + return res; + } - const client: AxiosInstance = this._defaultClient; + /** + * List your organization's fine-tuning jobs + * + */ + async listFineTunes(config?: AxiosRequestConfig): Promise { + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/fine-tunes"; + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + ...config, + }); - const r = client.request({ - url: url, - method: "get", - ...config, - }); + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.RetrieveEngineResponse = - new operations.RetrieveEngineResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + const res: operations.ListFineTunesResponse = new operations.ListFineTunesResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.engine = httpRes?.data; - } - break; - } - - return res; - }); - } - - /** - * Returns information about a specific file. - */ - retrieveFile( - req: operations.RetrieveFileRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.RetrieveFileRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.listFineTunesResponse = utils.objectToClass( + httpRes?.data, + shared.ListFineTunesResponse + ); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = utils.generateURL(baseURL, "/files/{file_id}", req); + /** + * Lists the currently available models, and provides basic information about each one such as the owner and availability. + */ + async listModels(config?: AxiosRequestConfig): Promise { + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/models"; + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.ListModelsResponse = new operations.ListModelsResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.listModelsResponse = utils.objectToClass( + httpRes?.data, + shared.ListModelsResponse + ); + } + break; + } + + return res; + } - const client: AxiosInstance = this._defaultClient; + /** + * Retrieves a model instance, providing basic information about it such as the owner and availability. + * + * @deprecated this method will be removed in a future release, please migrate away from it as soon as possible + */ + async retrieveEngine( + req: operations.RetrieveEngineRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.RetrieveEngineRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/engines/{engine_id}", req); + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + ...config, + }); - const r = client.request({ - url: url, - method: "get", - ...config, - }); + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.RetrieveFileResponse = - new operations.RetrieveFileResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + const res: operations.RetrieveEngineResponse = new operations.RetrieveEngineResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.openAIFile = httpRes?.data; - } - break; - } - - return res; - }); - } - - /** - * Gets info about the fine-tune job. - * - * [Learn more about Fine-tuning](/docs/guides/fine-tuning) - * - */ - retrieveFineTune( - req: operations.RetrieveFineTuneRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.RetrieveFineTuneRequest(req); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.engine = utils.objectToClass(httpRes?.data, shared.Engine); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = utils.generateURL( - baseURL, - "/fine-tunes/{fine_tune_id}", - req - ); - - const client: AxiosInstance = this._defaultClient; - - const r = client.request({ - url: url, - method: "get", - ...config, - }); - - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.RetrieveFineTuneResponse = - new operations.RetrieveFineTuneResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + /** + * Returns information about a specific file. + */ + async retrieveFile( + req: operations.RetrieveFileRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.RetrieveFileRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/files/{file_id}", req); + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + ...config, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.fineTune = httpRes?.data; - } - break; - } - - return res; - }); - } - - /** - * Retrieves a model instance, providing basic information about the model such as the owner and permissioning. - */ - retrieveModel( - req: operations.RetrieveModelRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.RetrieveModelRequest(req); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.RetrieveFileResponse = new operations.RetrieveFileResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.openAIFile = utils.objectToClass(httpRes?.data, shared.OpenAIFile); + } + break; + } + + return res; } - const baseURL: string = this._serverURL; - const url: string = utils.generateURL(baseURL, "/models/{model}", req); + /** + * Gets info about the fine-tune job. + * + * [Learn more about Fine-tuning](/docs/guides/fine-tuning) + * + */ + async retrieveFineTune( + req: operations.RetrieveFineTuneRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.RetrieveFineTuneRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}", req); + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - const client: AxiosInstance = this._defaultClient; + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } - const r = client.request({ - url: url, - method: "get", - ...config, - }); + const res: operations.RetrieveFineTuneResponse = new operations.RetrieveFineTuneResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.fineTune = utils.objectToClass(httpRes?.data, shared.FineTune); + } + break; + } + + return res; + } - return r.then((httpRes: AxiosResponse) => { - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + /** + * Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + */ + async retrieveModel( + req: operations.RetrieveModelRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.RetrieveModelRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/models/{model}", req); + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + headers["Accept"] = "application/json"; + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + ...config, + }); - if (httpRes?.status == null) - throw new Error(`status code not found in response: ${httpRes}`); - const res: operations.RetrieveModelResponse = - new operations.RetrieveModelResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.RetrieveModelResponse = new operations.RetrieveModelResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, }); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.model = httpRes?.data; - } - break; - } - - return res; - }); - } + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.model = utils.objectToClass(httpRes?.data, shared.Model); + } + break; + } + + return res; + } } diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 05da8d3..9f9a5e6 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -3,7 +3,8 @@ */ import { OpenAI } from "./openai"; -import axios, { AxiosInstance } from "axios"; +import axios from "axios"; +import { AxiosInstance } from "axios"; /** * Contains the list of servers available to the SDK @@ -14,47 +15,65 @@ export const ServerList = ["https://api.openai.com/v1"] as const; * The available configuration options for the SDK */ export type SDKProps = { - /** - * Allows overriding the default axios client used by the SDK - */ - defaultClient?: AxiosInstance; - /** - * Allows overriding the default server URL used by the SDK - */ - serverURL?: string; + /** + * Allows overriding the default axios client used by the SDK + */ + defaultClient?: AxiosInstance; + + /** + * Allows overriding the default server used by the SDK + */ + serverIdx?: number; + + /** + * Allows overriding the default server URL used by the SDK + */ + serverURL?: string; }; +export class SDKConfiguration { + defaultClient: AxiosInstance; + securityClient: AxiosInstance; + serverURL: string; + serverDefaults: any; + language = "typescript"; + openapiDocVersion = "1.2.0"; + sdkVersion = "1.10.0"; + genVersion = "2.37.2"; + + public constructor(init?: Partial) { + Object.assign(this, init); + } +} + /** - * APIs for sampling from and fine-tuning language models + * OpenAI API: APIs for sampling from and fine-tuning language models */ export class Gpt { - /** - * The OpenAI REST API - */ - public openAI: OpenAI; - - public _defaultClient: AxiosInstance; - public _securityClient: AxiosInstance; - public _serverURL: string; - private _language = "typescript"; - private _sdkVersion = "1.9.2"; - private _genVersion = "2.16.5"; - private _globals: any; - - constructor(props?: SDKProps) { - this._serverURL = props?.serverURL ?? ServerList[0]; - - this._defaultClient = - props?.defaultClient ?? axios.create({ baseURL: this._serverURL }); - this._securityClient = this._defaultClient; - - this.openAI = new OpenAI( - this._defaultClient, - this._securityClient, - this._serverURL, - this._language, - this._sdkVersion, - this._genVersion - ); - } + /** + * The OpenAI REST API + */ + public openAI: OpenAI; + + private sdkConfiguration: SDKConfiguration; + + constructor(props?: SDKProps) { + let serverURL = props?.serverURL; + const serverIdx = props?.serverIdx ?? 0; + + if (!serverURL) { + serverURL = ServerList[serverIdx]; + } + + const defaultClient = props?.defaultClient ?? axios.create({ baseURL: serverURL }); + const securityClient = defaultClient; + + this.sdkConfiguration = new SDKConfiguration({ + defaultClient: defaultClient, + securityClient: securityClient, + serverURL: serverURL, + }); + + this.openAI = new OpenAI(this.sdkConfiguration); + } } diff --git a/src/sdk/types/index.ts b/src/sdk/types/index.ts new file mode 100755 index 0000000..de759a3 --- /dev/null +++ b/src/sdk/types/index.ts @@ -0,0 +1,5 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +export * from "./rfcdate"; diff --git a/src/sdk/types/rfcdate.ts b/src/sdk/types/rfcdate.ts new file mode 100755 index 0000000..1a3d24b --- /dev/null +++ b/src/sdk/types/rfcdate.ts @@ -0,0 +1,35 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +export class RFCDate { + private date: Date; + + constructor(date?: Date | string) { + if (typeof date === "string") { + this.date = new Date(date); + } else { + this.date = date ?? new Date(); + } + } + + public getDate(): Date { + return this.date; + } + + public toJSON(): string { + return this.toString(); + } + + public toString(): string { + const dateRegex = /^(\d{4})-(\d{2})-(\d{2})/; + + const matches = this.date.toISOString().match(dateRegex); + if (matches == null) { + throw new Error("Date format is not valid"); + } + + const [, year, month, day]: RegExpMatchArray = matches; + return `${year}-${month}-${day}`; + } +} From 6ea40d91b53b2ac22d2fa4c01f3ed9d7ddd32c20 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sat, 10 Jun 2023 01:09:38 +0000 Subject: [PATCH 02/66] ci: regenerated with OpenAPI Doc 1.2.0, Speakeay CLI 1.47.0 --- README.md | 62 +- RELEASES.md | 10 +- .../operations/cancelfinetunerequest.md | 8 + .../operations/cancelfinetuneresponse.md | 11 + .../models/operations/createanswerresponse.md | 11 + .../createchatcompletionresponse.md | 11 + .../createclassificationresponse.md | 11 + .../operations/createcompletionresponse.md | 11 + docs/models/operations/createeditresponse.md | 11 + .../operations/createembeddingresponse.md | 11 + docs/models/operations/createfileresponse.md | 11 + .../operations/createfinetuneresponse.md | 11 + .../operations/createimageeditresponse.md | 11 + docs/models/operations/createimageresponse.md | 11 + .../createimagevariationresponse.md | 11 + .../operations/createmoderationresponse.md | 11 + docs/models/operations/createsearchrequest.md | 9 + .../models/operations/createsearchresponse.md | 11 + .../operations/createtranscriptionresponse.md | 11 + .../operations/createtranslationresponse.md | 11 + docs/models/operations/deletefilerequest.md | 8 + docs/models/operations/deletefileresponse.md | 11 + docs/models/operations/deletemodelrequest.md | 8 + docs/models/operations/deletemodelresponse.md | 11 + docs/models/operations/downloadfilerequest.md | 8 + .../models/operations/downloadfileresponse.md | 11 + docs/models/operations/listenginesresponse.md | 11 + docs/models/operations/listfilesresponse.md | 11 + .../operations/listfinetuneeventsrequest.md | 9 + .../operations/listfinetuneeventsresponse.md | 11 + .../operations/listfinetunesresponse.md | 11 + docs/models/operations/listmodelsresponse.md | 11 + .../operations/retrieveenginerequest.md | 8 + .../operations/retrieveengineresponse.md | 11 + docs/models/operations/retrievefilerequest.md | 8 + .../models/operations/retrievefileresponse.md | 11 + .../operations/retrievefinetunerequest.md | 8 + .../operations/retrievefinetuneresponse.md | 11 + .../models/operations/retrievemodelrequest.md | 8 + .../operations/retrievemodelresponse.md | 11 + .../shared/chatcompletionrequestmessage.md | 10 + .../chatcompletionrequestmessagerole.md | 12 + .../shared/chatcompletionresponsemessage.md | 9 + .../chatcompletionresponsemessagerole.md | 12 + docs/models/shared/createanswerrequest.md | 25 + docs/models/shared/createanswerresponse.md | 15 + .../createanswerresponseselecteddocuments.md | 9 + .../shared/createchatcompletionrequest.md | 19 + .../createchatcompletionrequestlogitbias.md | 12 + .../shared/createchatcompletionresponse.md | 15 + .../createchatcompletionresponsechoices.md | 10 + .../createchatcompletionresponseusage.md | 10 + .../shared/createclassificationrequest.md | 21 + .../shared/createclassificationresponse.md | 15 + ...eclassificationresponseselectedexamples.md | 10 + docs/models/shared/createcompletionrequest.md | 23 + .../createcompletionrequestlogitbias.md | 14 + .../models/shared/createcompletionresponse.md | 15 + .../shared/createcompletionresponsechoices.md | 11 + ...createcompletionresponsechoiceslogprobs.md | 11 + ...etionresponsechoiceslogprobstoplogprobs.md | 7 + .../shared/createcompletionresponseusage.md | 10 + docs/models/shared/createeditrequest.md | 13 + docs/models/shared/createeditresponse.md | 13 + .../shared/createeditresponsechoices.md | 11 + .../createeditresponsechoiceslogprobs.md | 11 + ...eeditresponsechoiceslogprobstoplogprobs.md | 7 + docs/models/shared/createeditresponseusage.md | 10 + docs/models/shared/createembeddingrequest.md | 10 + docs/models/shared/createembeddingresponse.md | 13 + .../shared/createembeddingresponsedata.md | 10 + .../shared/createembeddingresponseusage.md | 9 + docs/models/shared/createfilerequest.md | 9 + docs/models/shared/createfilerequestfile.md | 9 + docs/models/shared/createfinetunerequest.md | 19 + docs/models/shared/createimageeditrequest.md | 14 + .../shared/createimageeditrequestimage.md | 9 + .../shared/createimageeditrequestmask.md | 9 + docs/models/shared/createimagerequest.md | 12 + .../createimagerequestresponseformat.md | 11 + docs/models/shared/createimagerequestsize.md | 12 + .../shared/createimagevariationrequest.md | 12 + .../createimagevariationrequestimage.md | 9 + docs/models/shared/createmoderationrequest.md | 9 + .../models/shared/createmoderationresponse.md | 12 + .../shared/createmoderationresponseresults.md | 10 + ...eatemoderationresponseresultscategories.md | 14 + ...moderationresponseresultscategoryscores.md | 14 + docs/models/shared/createsearchrequest.md | 13 + docs/models/shared/createsearchresponse.md | 12 + .../models/shared/createsearchresponsedata.md | 10 + .../shared/createtranscriptionrequest.md | 13 + .../shared/createtranscriptionrequestfile.md | 9 + .../shared/createtranscriptionresponse.md | 10 + .../models/shared/createtranslationrequest.md | 12 + .../shared/createtranslationrequestfile.md | 9 + .../shared/createtranslationresponse.md | 10 + docs/models/shared/deletefileresponse.md | 12 + docs/models/shared/deletemodelresponse.md | 12 + docs/models/shared/engine.md | 13 + docs/models/shared/finetune.md | 22 + docs/models/shared/finetuneevent.md | 11 + docs/models/shared/finetunehyperparams.md | 7 + docs/models/shared/imagesresponse.md | 11 + docs/models/shared/imagesresponsedata.md | 9 + docs/models/shared/listenginesresponse.md | 11 + docs/models/shared/listfilesresponse.md | 11 + .../shared/listfinetuneeventsresponse.md | 11 + docs/models/shared/listfinetunesresponse.md | 11 + docs/models/shared/listmodelsresponse.md | 11 + docs/models/shared/model.md | 13 + docs/models/shared/openaifile.md | 17 + docs/models/shared/openaifilestatusdetails.md | 7 + docs/openai/README.md | 910 ------------ docs/{ => sdks}/gpt/README.md | 0 docs/sdks/openai/README.md | 1238 +++++++++++++++++ files.gen | 117 +- gen.yaml | 6 +- package-lock.json | 4 +- package.json | 2 +- src/internal/utils/utils.ts | 10 +- .../shared/createchatcompletionrequest.ts | 13 +- .../models/shared/createcompletionrequest.ts | 17 +- .../models/shared/createcompletionresponse.ts | 7 +- src/sdk/models/shared/createeditresponse.ts | 7 +- src/sdk/models/shared/finetune.ts | 5 +- src/sdk/models/shared/openaifile.ts | 7 +- src/sdk/sdk.ts | 4 +- 128 files changed, 2715 insertions(+), 967 deletions(-) create mode 100755 docs/models/operations/cancelfinetunerequest.md create mode 100755 docs/models/operations/cancelfinetuneresponse.md create mode 100755 docs/models/operations/createanswerresponse.md create mode 100755 docs/models/operations/createchatcompletionresponse.md create mode 100755 docs/models/operations/createclassificationresponse.md create mode 100755 docs/models/operations/createcompletionresponse.md create mode 100755 docs/models/operations/createeditresponse.md create mode 100755 docs/models/operations/createembeddingresponse.md create mode 100755 docs/models/operations/createfileresponse.md create mode 100755 docs/models/operations/createfinetuneresponse.md create mode 100755 docs/models/operations/createimageeditresponse.md create mode 100755 docs/models/operations/createimageresponse.md create mode 100755 docs/models/operations/createimagevariationresponse.md create mode 100755 docs/models/operations/createmoderationresponse.md create mode 100755 docs/models/operations/createsearchrequest.md create mode 100755 docs/models/operations/createsearchresponse.md create mode 100755 docs/models/operations/createtranscriptionresponse.md create mode 100755 docs/models/operations/createtranslationresponse.md create mode 100755 docs/models/operations/deletefilerequest.md create mode 100755 docs/models/operations/deletefileresponse.md create mode 100755 docs/models/operations/deletemodelrequest.md create mode 100755 docs/models/operations/deletemodelresponse.md create mode 100755 docs/models/operations/downloadfilerequest.md create mode 100755 docs/models/operations/downloadfileresponse.md create mode 100755 docs/models/operations/listenginesresponse.md create mode 100755 docs/models/operations/listfilesresponse.md create mode 100755 docs/models/operations/listfinetuneeventsrequest.md create mode 100755 docs/models/operations/listfinetuneeventsresponse.md create mode 100755 docs/models/operations/listfinetunesresponse.md create mode 100755 docs/models/operations/listmodelsresponse.md create mode 100755 docs/models/operations/retrieveenginerequest.md create mode 100755 docs/models/operations/retrieveengineresponse.md create mode 100755 docs/models/operations/retrievefilerequest.md create mode 100755 docs/models/operations/retrievefileresponse.md create mode 100755 docs/models/operations/retrievefinetunerequest.md create mode 100755 docs/models/operations/retrievefinetuneresponse.md create mode 100755 docs/models/operations/retrievemodelrequest.md create mode 100755 docs/models/operations/retrievemodelresponse.md create mode 100755 docs/models/shared/chatcompletionrequestmessage.md create mode 100755 docs/models/shared/chatcompletionrequestmessagerole.md create mode 100755 docs/models/shared/chatcompletionresponsemessage.md create mode 100755 docs/models/shared/chatcompletionresponsemessagerole.md create mode 100755 docs/models/shared/createanswerrequest.md create mode 100755 docs/models/shared/createanswerresponse.md create mode 100755 docs/models/shared/createanswerresponseselecteddocuments.md create mode 100755 docs/models/shared/createchatcompletionrequest.md create mode 100755 docs/models/shared/createchatcompletionrequestlogitbias.md create mode 100755 docs/models/shared/createchatcompletionresponse.md create mode 100755 docs/models/shared/createchatcompletionresponsechoices.md create mode 100755 docs/models/shared/createchatcompletionresponseusage.md create mode 100755 docs/models/shared/createclassificationrequest.md create mode 100755 docs/models/shared/createclassificationresponse.md create mode 100755 docs/models/shared/createclassificationresponseselectedexamples.md create mode 100755 docs/models/shared/createcompletionrequest.md create mode 100755 docs/models/shared/createcompletionrequestlogitbias.md create mode 100755 docs/models/shared/createcompletionresponse.md create mode 100755 docs/models/shared/createcompletionresponsechoices.md create mode 100755 docs/models/shared/createcompletionresponsechoiceslogprobs.md create mode 100755 docs/models/shared/createcompletionresponsechoiceslogprobstoplogprobs.md create mode 100755 docs/models/shared/createcompletionresponseusage.md create mode 100755 docs/models/shared/createeditrequest.md create mode 100755 docs/models/shared/createeditresponse.md create mode 100755 docs/models/shared/createeditresponsechoices.md create mode 100755 docs/models/shared/createeditresponsechoiceslogprobs.md create mode 100755 docs/models/shared/createeditresponsechoiceslogprobstoplogprobs.md create mode 100755 docs/models/shared/createeditresponseusage.md create mode 100755 docs/models/shared/createembeddingrequest.md create mode 100755 docs/models/shared/createembeddingresponse.md create mode 100755 docs/models/shared/createembeddingresponsedata.md create mode 100755 docs/models/shared/createembeddingresponseusage.md create mode 100755 docs/models/shared/createfilerequest.md create mode 100755 docs/models/shared/createfilerequestfile.md create mode 100755 docs/models/shared/createfinetunerequest.md create mode 100755 docs/models/shared/createimageeditrequest.md create mode 100755 docs/models/shared/createimageeditrequestimage.md create mode 100755 docs/models/shared/createimageeditrequestmask.md create mode 100755 docs/models/shared/createimagerequest.md create mode 100755 docs/models/shared/createimagerequestresponseformat.md create mode 100755 docs/models/shared/createimagerequestsize.md create mode 100755 docs/models/shared/createimagevariationrequest.md create mode 100755 docs/models/shared/createimagevariationrequestimage.md create mode 100755 docs/models/shared/createmoderationrequest.md create mode 100755 docs/models/shared/createmoderationresponse.md create mode 100755 docs/models/shared/createmoderationresponseresults.md create mode 100755 docs/models/shared/createmoderationresponseresultscategories.md create mode 100755 docs/models/shared/createmoderationresponseresultscategoryscores.md create mode 100755 docs/models/shared/createsearchrequest.md create mode 100755 docs/models/shared/createsearchresponse.md create mode 100755 docs/models/shared/createsearchresponsedata.md create mode 100755 docs/models/shared/createtranscriptionrequest.md create mode 100755 docs/models/shared/createtranscriptionrequestfile.md create mode 100755 docs/models/shared/createtranscriptionresponse.md create mode 100755 docs/models/shared/createtranslationrequest.md create mode 100755 docs/models/shared/createtranslationrequestfile.md create mode 100755 docs/models/shared/createtranslationresponse.md create mode 100755 docs/models/shared/deletefileresponse.md create mode 100755 docs/models/shared/deletemodelresponse.md create mode 100755 docs/models/shared/engine.md create mode 100755 docs/models/shared/finetune.md create mode 100755 docs/models/shared/finetuneevent.md create mode 100755 docs/models/shared/finetunehyperparams.md create mode 100755 docs/models/shared/imagesresponse.md create mode 100755 docs/models/shared/imagesresponsedata.md create mode 100755 docs/models/shared/listenginesresponse.md create mode 100755 docs/models/shared/listfilesresponse.md create mode 100755 docs/models/shared/listfinetuneeventsresponse.md create mode 100755 docs/models/shared/listfinetunesresponse.md create mode 100755 docs/models/shared/listmodelsresponse.md create mode 100755 docs/models/shared/model.md create mode 100755 docs/models/shared/openaifile.md create mode 100755 docs/models/shared/openaifilestatusdetails.md delete mode 100755 docs/openai/README.md rename docs/{ => sdks}/gpt/README.md (100%) create mode 100755 docs/sdks/openai/README.md diff --git a/README.md b/README.md index 7691dac..c8dbc88 100755 --- a/README.md +++ b/README.md @@ -59,16 +59,16 @@ sdk.openAI.cancelFineTune({ ## Available Resources and Operations -### [openAI](docs/openai/README.md) +### [openAI](docs/sdks/openai/README.md) -* [cancelFineTune](docs/openai/README.md#cancelfinetune) - Immediately cancel a fine-tune job. +* [cancelFineTune](docs/sdks/openai/README.md#cancelfinetune) - Immediately cancel a fine-tune job. -* [~~createAnswer~~](docs/openai/README.md#createanswer) - Answers the specified question using the provided documents and examples. +* [~~createAnswer~~](docs/sdks/openai/README.md#createanswer) - Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). :warning: **Deprecated** -* [createChatCompletion](docs/openai/README.md#createchatcompletion) - Creates a completion for the chat message -* [~~createClassification~~](docs/openai/README.md#createclassification) - Classifies the specified `query` using provided examples. +* [createChatCompletion](docs/sdks/openai/README.md#createchatcompletion) - Creates a completion for the chat message +* [~~createClassification~~](docs/sdks/openai/README.md#createclassification) - Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples @@ -78,46 +78,46 @@ are combined with the query to construct a prompt to produce the final label via Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. :warning: **Deprecated** -* [createCompletion](docs/openai/README.md#createcompletion) - Creates a completion for the provided prompt and parameters -* [createEdit](docs/openai/README.md#createedit) - Creates a new edit for the provided input, instruction, and parameters. -* [createEmbedding](docs/openai/README.md#createembedding) - Creates an embedding vector representing the input text. -* [createFile](docs/openai/README.md#createfile) - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. +* [createCompletion](docs/sdks/openai/README.md#createcompletion) - Creates a completion for the provided prompt and parameters +* [createEdit](docs/sdks/openai/README.md#createedit) - Creates a new edit for the provided input, instruction, and parameters. +* [createEmbedding](docs/sdks/openai/README.md#createembedding) - Creates an embedding vector representing the input text. +* [createFile](docs/sdks/openai/README.md#createfile) - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. -* [createFineTune](docs/openai/README.md#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. +* [createFineTune](docs/sdks/openai/README.md#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about Fine-tuning](/docs/guides/fine-tuning) -* [createImage](docs/openai/README.md#createimage) - Creates an image given a prompt. -* [createImageEdit](docs/openai/README.md#createimageedit) - Creates an edited or extended image given an original image and a prompt. -* [createImageVariation](docs/openai/README.md#createimagevariation) - Creates a variation of a given image. -* [createModeration](docs/openai/README.md#createmoderation) - Classifies if text violates OpenAI's Content Policy -* [~~createSearch~~](docs/openai/README.md#createsearch) - The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. +* [createImage](docs/sdks/openai/README.md#createimage) - Creates an image given a prompt. +* [createImageEdit](docs/sdks/openai/README.md#createimageedit) - Creates an edited or extended image given an original image and a prompt. +* [createImageVariation](docs/sdks/openai/README.md#createimagevariation) - Creates a variation of a given image. +* [createModeration](docs/sdks/openai/README.md#createmoderation) - Classifies if text violates OpenAI's Content Policy +* [~~createSearch~~](docs/sdks/openai/README.md#createsearch) - The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. :warning: **Deprecated** -* [createTranscription](docs/openai/README.md#createtranscription) - Transcribes audio into the input language. -* [createTranslation](docs/openai/README.md#createtranslation) - Translates audio into into English. -* [deleteFile](docs/openai/README.md#deletefile) - Delete a file. -* [deleteModel](docs/openai/README.md#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization. -* [downloadFile](docs/openai/README.md#downloadfile) - Returns the contents of the specified file -* [~~listEngines~~](docs/openai/README.md#listengines) - Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. :warning: **Deprecated** -* [listFiles](docs/openai/README.md#listfiles) - Returns a list of files that belong to the user's organization. -* [listFineTuneEvents](docs/openai/README.md#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. - -* [listFineTunes](docs/openai/README.md#listfinetunes) - List your organization's fine-tuning jobs - -* [listModels](docs/openai/README.md#listmodels) - Lists the currently available models, and provides basic information about each one such as the owner and availability. -* [~~retrieveEngine~~](docs/openai/README.md#retrieveengine) - Retrieves a model instance, providing basic information about it such as the owner and availability. :warning: **Deprecated** -* [retrieveFile](docs/openai/README.md#retrievefile) - Returns information about a specific file. -* [retrieveFineTune](docs/openai/README.md#retrievefinetune) - Gets info about the fine-tune job. +* [createTranscription](docs/sdks/openai/README.md#createtranscription) - Transcribes audio into the input language. +* [createTranslation](docs/sdks/openai/README.md#createtranslation) - Translates audio into into English. +* [deleteFile](docs/sdks/openai/README.md#deletefile) - Delete a file. +* [deleteModel](docs/sdks/openai/README.md#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization. +* [downloadFile](docs/sdks/openai/README.md#downloadfile) - Returns the contents of the specified file +* [~~listEngines~~](docs/sdks/openai/README.md#listengines) - Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. :warning: **Deprecated** +* [listFiles](docs/sdks/openai/README.md#listfiles) - Returns a list of files that belong to the user's organization. +* [listFineTuneEvents](docs/sdks/openai/README.md#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. + +* [listFineTunes](docs/sdks/openai/README.md#listfinetunes) - List your organization's fine-tuning jobs + +* [listModels](docs/sdks/openai/README.md#listmodels) - Lists the currently available models, and provides basic information about each one such as the owner and availability. +* [~~retrieveEngine~~](docs/sdks/openai/README.md#retrieveengine) - Retrieves a model instance, providing basic information about it such as the owner and availability. :warning: **Deprecated** +* [retrieveFile](docs/sdks/openai/README.md#retrievefile) - Returns information about a specific file. +* [retrieveFineTune](docs/sdks/openai/README.md#retrievefinetune) - Gets info about the fine-tune job. [Learn more about Fine-tuning](/docs/guides/fine-tuning) -* [retrieveModel](docs/openai/README.md#retrievemodel) - Retrieves a model instance, providing basic information about the model such as the owner and permissioning. +* [retrieveModel](docs/sdks/openai/README.md#retrievemodel) - Retrieves a model instance, providing basic information about the model such as the owner and permissioning. ### SDK Generated by [Speakeasy](https://docs.speakeasyapi.dev/docs/using-speakeasy/client-sdks) diff --git a/RELEASES.md b/RELEASES.md index 24deca2..8c22a12 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -156,4 +156,12 @@ Based on: - OpenAPI Doc 1.2.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.45.2 (2.37.2) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v1.10.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.10.0 - . \ No newline at end of file +- [NPM v1.10.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.10.0 - . + +## 2023-06-10 01:09:14 +### Changes +Based on: +- OpenAPI Doc 1.2.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.47.0 (2.39.0) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v1.11.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.11.0 - . \ No newline at end of file diff --git a/docs/models/operations/cancelfinetunerequest.md b/docs/models/operations/cancelfinetunerequest.md new file mode 100755 index 0000000..4197899 --- /dev/null +++ b/docs/models/operations/cancelfinetunerequest.md @@ -0,0 +1,8 @@ +# CancelFineTuneRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | +| `fineTuneId` | *string* | :heavy_check_mark: | The ID of the fine-tune job to cancel
| ft-AF1WoRqd3aJAHsqc9NY7iL8F | \ No newline at end of file diff --git a/docs/models/operations/cancelfinetuneresponse.md b/docs/models/operations/cancelfinetuneresponse.md new file mode 100755 index 0000000..72af593 --- /dev/null +++ b/docs/models/operations/cancelfinetuneresponse.md @@ -0,0 +1,11 @@ +# CancelFineTuneResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `fineTune` | [shared.FineTune](../../models/shared/finetune.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createanswerresponse.md b/docs/models/operations/createanswerresponse.md new file mode 100755 index 0000000..54cf7fa --- /dev/null +++ b/docs/models/operations/createanswerresponse.md @@ -0,0 +1,11 @@ +# CreateAnswerResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `createAnswerResponse` | [shared.CreateAnswerResponse](../../models/shared/createanswerresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createchatcompletionresponse.md b/docs/models/operations/createchatcompletionresponse.md new file mode 100755 index 0000000..abd2419 --- /dev/null +++ b/docs/models/operations/createchatcompletionresponse.md @@ -0,0 +1,11 @@ +# CreateChatCompletionResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `createChatCompletionResponse` | [shared.CreateChatCompletionResponse](../../models/shared/createchatcompletionresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createclassificationresponse.md b/docs/models/operations/createclassificationresponse.md new file mode 100755 index 0000000..7f08699 --- /dev/null +++ b/docs/models/operations/createclassificationresponse.md @@ -0,0 +1,11 @@ +# CreateClassificationResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `createClassificationResponse` | [shared.CreateClassificationResponse](../../models/shared/createclassificationresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createcompletionresponse.md b/docs/models/operations/createcompletionresponse.md new file mode 100755 index 0000000..2bcda50 --- /dev/null +++ b/docs/models/operations/createcompletionresponse.md @@ -0,0 +1,11 @@ +# CreateCompletionResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `createCompletionResponse` | [shared.CreateCompletionResponse](../../models/shared/createcompletionresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createeditresponse.md b/docs/models/operations/createeditresponse.md new file mode 100755 index 0000000..38f3429 --- /dev/null +++ b/docs/models/operations/createeditresponse.md @@ -0,0 +1,11 @@ +# CreateEditResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `createEditResponse` | [shared.CreateEditResponse](../../models/shared/createeditresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createembeddingresponse.md b/docs/models/operations/createembeddingresponse.md new file mode 100755 index 0000000..bae0066 --- /dev/null +++ b/docs/models/operations/createembeddingresponse.md @@ -0,0 +1,11 @@ +# CreateEmbeddingResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `createEmbeddingResponse` | [shared.CreateEmbeddingResponse](../../models/shared/createembeddingresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createfileresponse.md b/docs/models/operations/createfileresponse.md new file mode 100755 index 0000000..4be2e54 --- /dev/null +++ b/docs/models/operations/createfileresponse.md @@ -0,0 +1,11 @@ +# CreateFileResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `openAIFile` | [shared.OpenAIFile](../../models/shared/openaifile.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createfinetuneresponse.md b/docs/models/operations/createfinetuneresponse.md new file mode 100755 index 0000000..385bf3f --- /dev/null +++ b/docs/models/operations/createfinetuneresponse.md @@ -0,0 +1,11 @@ +# CreateFineTuneResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `fineTune` | [shared.FineTune](../../models/shared/finetune.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createimageeditresponse.md b/docs/models/operations/createimageeditresponse.md new file mode 100755 index 0000000..8c7a6f8 --- /dev/null +++ b/docs/models/operations/createimageeditresponse.md @@ -0,0 +1,11 @@ +# CreateImageEditResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `imagesResponse` | [shared.ImagesResponse](../../models/shared/imagesresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createimageresponse.md b/docs/models/operations/createimageresponse.md new file mode 100755 index 0000000..2abaad8 --- /dev/null +++ b/docs/models/operations/createimageresponse.md @@ -0,0 +1,11 @@ +# CreateImageResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `imagesResponse` | [shared.ImagesResponse](../../models/shared/imagesresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createimagevariationresponse.md b/docs/models/operations/createimagevariationresponse.md new file mode 100755 index 0000000..f5d958e --- /dev/null +++ b/docs/models/operations/createimagevariationresponse.md @@ -0,0 +1,11 @@ +# CreateImageVariationResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `imagesResponse` | [shared.ImagesResponse](../../models/shared/imagesresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createmoderationresponse.md b/docs/models/operations/createmoderationresponse.md new file mode 100755 index 0000000..d04c6fb --- /dev/null +++ b/docs/models/operations/createmoderationresponse.md @@ -0,0 +1,11 @@ +# CreateModerationResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `createModerationResponse` | [shared.CreateModerationResponse](../../models/shared/createmoderationresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createsearchrequest.md b/docs/models/operations/createsearchrequest.md new file mode 100755 index 0000000..a7ad86a --- /dev/null +++ b/docs/models/operations/createsearchrequest.md @@ -0,0 +1,9 @@ +# CreateSearchRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `createSearchRequest` | [shared.CreateSearchRequest](../../models/shared/createsearchrequest.md) | :heavy_check_mark: | N/A | | +| `engineId` | *string* | :heavy_check_mark: | The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`. | davinci | \ No newline at end of file diff --git a/docs/models/operations/createsearchresponse.md b/docs/models/operations/createsearchresponse.md new file mode 100755 index 0000000..5306a56 --- /dev/null +++ b/docs/models/operations/createsearchresponse.md @@ -0,0 +1,11 @@ +# CreateSearchResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `createSearchResponse` | [shared.CreateSearchResponse](../../models/shared/createsearchresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createtranscriptionresponse.md b/docs/models/operations/createtranscriptionresponse.md new file mode 100755 index 0000000..99c673c --- /dev/null +++ b/docs/models/operations/createtranscriptionresponse.md @@ -0,0 +1,11 @@ +# CreateTranscriptionResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `createTranscriptionResponse` | [shared.CreateTranscriptionResponse](../../models/shared/createtranscriptionresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createtranslationresponse.md b/docs/models/operations/createtranslationresponse.md new file mode 100755 index 0000000..2818abd --- /dev/null +++ b/docs/models/operations/createtranslationresponse.md @@ -0,0 +1,11 @@ +# CreateTranslationResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `createTranslationResponse` | [shared.CreateTranslationResponse](../../models/shared/createtranslationresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/deletefilerequest.md b/docs/models/operations/deletefilerequest.md new file mode 100755 index 0000000..476343d --- /dev/null +++ b/docs/models/operations/deletefilerequest.md @@ -0,0 +1,8 @@ +# DeleteFileRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `fileId` | *string* | :heavy_check_mark: | The ID of the file to use for this request | \ No newline at end of file diff --git a/docs/models/operations/deletefileresponse.md b/docs/models/operations/deletefileresponse.md new file mode 100755 index 0000000..2141bfd --- /dev/null +++ b/docs/models/operations/deletefileresponse.md @@ -0,0 +1,11 @@ +# DeleteFileResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `deleteFileResponse` | [shared.DeleteFileResponse](../../models/shared/deletefileresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/deletemodelrequest.md b/docs/models/operations/deletemodelrequest.md new file mode 100755 index 0000000..ce2c4d5 --- /dev/null +++ b/docs/models/operations/deletemodelrequest.md @@ -0,0 +1,8 @@ +# DeleteModelRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `model` | *string* | :heavy_check_mark: | The model to delete | curie:ft-acmeco-2021-03-03-21-44-20 | \ No newline at end of file diff --git a/docs/models/operations/deletemodelresponse.md b/docs/models/operations/deletemodelresponse.md new file mode 100755 index 0000000..53e22d2 --- /dev/null +++ b/docs/models/operations/deletemodelresponse.md @@ -0,0 +1,11 @@ +# DeleteModelResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `deleteModelResponse` | [shared.DeleteModelResponse](../../models/shared/deletemodelresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/downloadfilerequest.md b/docs/models/operations/downloadfilerequest.md new file mode 100755 index 0000000..fb50706 --- /dev/null +++ b/docs/models/operations/downloadfilerequest.md @@ -0,0 +1,8 @@ +# DownloadFileRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `fileId` | *string* | :heavy_check_mark: | The ID of the file to use for this request | \ No newline at end of file diff --git a/docs/models/operations/downloadfileresponse.md b/docs/models/operations/downloadfileresponse.md new file mode 100755 index 0000000..ecf092c --- /dev/null +++ b/docs/models/operations/downloadfileresponse.md @@ -0,0 +1,11 @@ +# DownloadFileResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | +| `downloadFile200ApplicationJSONString` | *string* | :heavy_minus_sign: | OK | \ No newline at end of file diff --git a/docs/models/operations/listenginesresponse.md b/docs/models/operations/listenginesresponse.md new file mode 100755 index 0000000..c81d050 --- /dev/null +++ b/docs/models/operations/listenginesresponse.md @@ -0,0 +1,11 @@ +# ListEnginesResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `listEnginesResponse` | [shared.ListEnginesResponse](../../models/shared/listenginesresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listfilesresponse.md b/docs/models/operations/listfilesresponse.md new file mode 100755 index 0000000..5807562 --- /dev/null +++ b/docs/models/operations/listfilesresponse.md @@ -0,0 +1,11 @@ +# ListFilesResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `listFilesResponse` | [shared.ListFilesResponse](../../models/shared/listfilesresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listfinetuneeventsrequest.md b/docs/models/operations/listfinetuneeventsrequest.md new file mode 100755 index 0000000..35fa550 --- /dev/null +++ b/docs/models/operations/listfinetuneeventsrequest.md @@ -0,0 +1,9 @@ +# ListFineTuneEventsRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `fineTuneId` | *string* | :heavy_check_mark: | The ID of the fine-tune job to get events for.
| ft-AF1WoRqd3aJAHsqc9NY7iL8F | +| `stream` | *boolean* | :heavy_minus_sign: | Whether to stream events for the fine-tune job. If set to true,
events will be sent as data-only
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
as they become available. The stream will terminate with a
`data: [DONE]` message when the job is finished (succeeded, cancelled,
or failed).

If set to false, only events generated so far will be returned.
| | \ No newline at end of file diff --git a/docs/models/operations/listfinetuneeventsresponse.md b/docs/models/operations/listfinetuneeventsresponse.md new file mode 100755 index 0000000..f291b8d --- /dev/null +++ b/docs/models/operations/listfinetuneeventsresponse.md @@ -0,0 +1,11 @@ +# ListFineTuneEventsResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `listFineTuneEventsResponse` | [shared.ListFineTuneEventsResponse](../../models/shared/listfinetuneeventsresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listfinetunesresponse.md b/docs/models/operations/listfinetunesresponse.md new file mode 100755 index 0000000..5351fd6 --- /dev/null +++ b/docs/models/operations/listfinetunesresponse.md @@ -0,0 +1,11 @@ +# ListFineTunesResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `listFineTunesResponse` | [shared.ListFineTunesResponse](../../models/shared/listfinetunesresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listmodelsresponse.md b/docs/models/operations/listmodelsresponse.md new file mode 100755 index 0000000..2bd95ce --- /dev/null +++ b/docs/models/operations/listmodelsresponse.md @@ -0,0 +1,11 @@ +# ListModelsResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `listModelsResponse` | [shared.ListModelsResponse](../../models/shared/listmodelsresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/retrieveenginerequest.md b/docs/models/operations/retrieveenginerequest.md new file mode 100755 index 0000000..1f9e3a3 --- /dev/null +++ b/docs/models/operations/retrieveenginerequest.md @@ -0,0 +1,8 @@ +# RetrieveEngineRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `engineId` | *string* | :heavy_check_mark: | The ID of the engine to use for this request
| davinci | \ No newline at end of file diff --git a/docs/models/operations/retrieveengineresponse.md b/docs/models/operations/retrieveengineresponse.md new file mode 100755 index 0000000..a26b597 --- /dev/null +++ b/docs/models/operations/retrieveengineresponse.md @@ -0,0 +1,11 @@ +# RetrieveEngineResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `engine` | [shared.Engine](../../models/shared/engine.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/retrievefilerequest.md b/docs/models/operations/retrievefilerequest.md new file mode 100755 index 0000000..6bf8992 --- /dev/null +++ b/docs/models/operations/retrievefilerequest.md @@ -0,0 +1,8 @@ +# RetrieveFileRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `fileId` | *string* | :heavy_check_mark: | The ID of the file to use for this request | \ No newline at end of file diff --git a/docs/models/operations/retrievefileresponse.md b/docs/models/operations/retrievefileresponse.md new file mode 100755 index 0000000..c8496ef --- /dev/null +++ b/docs/models/operations/retrievefileresponse.md @@ -0,0 +1,11 @@ +# RetrieveFileResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `openAIFile` | [shared.OpenAIFile](../../models/shared/openaifile.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/retrievefinetunerequest.md b/docs/models/operations/retrievefinetunerequest.md new file mode 100755 index 0000000..56f6349 --- /dev/null +++ b/docs/models/operations/retrievefinetunerequest.md @@ -0,0 +1,8 @@ +# RetrieveFineTuneRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `fineTuneId` | *string* | :heavy_check_mark: | The ID of the fine-tune job
| ft-AF1WoRqd3aJAHsqc9NY7iL8F | \ No newline at end of file diff --git a/docs/models/operations/retrievefinetuneresponse.md b/docs/models/operations/retrievefinetuneresponse.md new file mode 100755 index 0000000..7824495 --- /dev/null +++ b/docs/models/operations/retrievefinetuneresponse.md @@ -0,0 +1,11 @@ +# RetrieveFineTuneResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `fineTune` | [shared.FineTune](../../models/shared/finetune.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/retrievemodelrequest.md b/docs/models/operations/retrievemodelrequest.md new file mode 100755 index 0000000..b52c4f8 --- /dev/null +++ b/docs/models/operations/retrievemodelrequest.md @@ -0,0 +1,8 @@ +# RetrieveModelRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | +| `model` | *string* | :heavy_check_mark: | The ID of the model to use for this request | text-davinci-001 | \ No newline at end of file diff --git a/docs/models/operations/retrievemodelresponse.md b/docs/models/operations/retrievemodelresponse.md new file mode 100755 index 0000000..5eedb5f --- /dev/null +++ b/docs/models/operations/retrievemodelresponse.md @@ -0,0 +1,11 @@ +# RetrieveModelResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `model` | [shared.Model](../../models/shared/model.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/chatcompletionrequestmessage.md b/docs/models/shared/chatcompletionrequestmessage.md new file mode 100755 index 0000000..a4e7c3e --- /dev/null +++ b/docs/models/shared/chatcompletionrequestmessage.md @@ -0,0 +1,10 @@ +# ChatCompletionRequestMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | +| `content` | *string* | :heavy_check_mark: | The contents of the message | +| `name` | *string* | :heavy_minus_sign: | The name of the user in a multi-user chat | +| `role` | [ChatCompletionRequestMessageRole](../../models/shared/chatcompletionrequestmessagerole.md) | :heavy_check_mark: | The role of the author of this message. | \ No newline at end of file diff --git a/docs/models/shared/chatcompletionrequestmessagerole.md b/docs/models/shared/chatcompletionrequestmessagerole.md new file mode 100755 index 0000000..c594a95 --- /dev/null +++ b/docs/models/shared/chatcompletionrequestmessagerole.md @@ -0,0 +1,12 @@ +# ChatCompletionRequestMessageRole + +The role of the author of this message. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `System` | system | +| `User` | user | +| `Assistant` | assistant | \ No newline at end of file diff --git a/docs/models/shared/chatcompletionresponsemessage.md b/docs/models/shared/chatcompletionresponsemessage.md new file mode 100755 index 0000000..bbf084b --- /dev/null +++ b/docs/models/shared/chatcompletionresponsemessage.md @@ -0,0 +1,9 @@ +# ChatCompletionResponseMessage + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | +| `content` | *string* | :heavy_check_mark: | The contents of the message | +| `role` | [ChatCompletionResponseMessageRole](../../models/shared/chatcompletionresponsemessagerole.md) | :heavy_check_mark: | The role of the author of this message. | \ No newline at end of file diff --git a/docs/models/shared/chatcompletionresponsemessagerole.md b/docs/models/shared/chatcompletionresponsemessagerole.md new file mode 100755 index 0000000..b6e1f69 --- /dev/null +++ b/docs/models/shared/chatcompletionresponsemessagerole.md @@ -0,0 +1,12 @@ +# ChatCompletionResponseMessageRole + +The role of the author of this message. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `System` | system | +| `User` | user | +| `Assistant` | assistant | \ No newline at end of file diff --git a/docs/models/shared/createanswerrequest.md b/docs/models/shared/createanswerrequest.md new file mode 100755 index 0000000..5e0ef20 --- /dev/null +++ b/docs/models/shared/createanswerrequest.md @@ -0,0 +1,25 @@ +# CreateAnswerRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `documents` | *string*[] | :heavy_minus_sign: | List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples.

You should specify either `documents` or a `file`, but not both.
| | +| `examples` | *string*[][] | :heavy_check_mark: | List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples. | | +| `examplesContext` | *string* | :heavy_check_mark: | A text snippet containing the contextual information used to generate the answers for the `examples` you provide. | Ottawa, Canada's capital, is located in the east of southern Ontario, near the city of Montréal and the U.S. border. | +| `expand` | *any*[] | :heavy_minus_sign: | If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion. | | +| `file` | *string* | :heavy_minus_sign: | The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose.

You should specify either `documents` or a `file`, but not both.
| | +| `logitBias` | *any* | :heavy_minus_sign: | N/A | | +| `logprobs` | *number* | :heavy_minus_sign: | Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.

The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case.

When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
| | +| `maxRerank` | *number* | :heavy_minus_sign: | The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. | | +| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens allowed for the generated answer | | +| `model` | *string* | :heavy_check_mark: | ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`. | | +| `n` | *number* | :heavy_minus_sign: | How many answers to generate for each question. | | +| `question` | *string* | :heavy_check_mark: | Question to get answered. | What is the capital of Japan? | +| `returnMetadata` | *any* | :heavy_minus_sign: | N/A | | +| `returnPrompt` | *boolean* | :heavy_minus_sign: | If set to `true`, the returned JSON will include a "prompt" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes. | | +| `searchModel` | *string* | :heavy_minus_sign: | ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`. | | +| `stop` | *any* | :heavy_minus_sign: | completions_stop_description | | +| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. | | +| `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/shared/createanswerresponse.md b/docs/models/shared/createanswerresponse.md new file mode 100755 index 0000000..b1536f9 --- /dev/null +++ b/docs/models/shared/createanswerresponse.md @@ -0,0 +1,15 @@ +# CreateAnswerResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | +| `answers` | *string*[] | :heavy_minus_sign: | N/A | +| `completion` | *string* | :heavy_minus_sign: | N/A | +| `model` | *string* | :heavy_minus_sign: | N/A | +| `object` | *string* | :heavy_minus_sign: | N/A | +| `searchModel` | *string* | :heavy_minus_sign: | N/A | +| `selectedDocuments` | [CreateAnswerResponseSelectedDocuments](../../models/shared/createanswerresponseselecteddocuments.md)[] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createanswerresponseselecteddocuments.md b/docs/models/shared/createanswerresponseselecteddocuments.md new file mode 100755 index 0000000..529d040 --- /dev/null +++ b/docs/models/shared/createanswerresponseselecteddocuments.md @@ -0,0 +1,9 @@ +# CreateAnswerResponseSelectedDocuments + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `document` | *number* | :heavy_minus_sign: | N/A | +| `text` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionrequest.md b/docs/models/shared/createchatcompletionrequest.md new file mode 100755 index 0000000..1f2af27 --- /dev/null +++ b/docs/models/shared/createchatcompletionrequest.md @@ -0,0 +1,19 @@ +# CreateChatCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `frequencyPenalty` | *number* | :heavy_minus_sign: | completions_frequency_penalty_description | | +| `logitBias` | [CreateChatCompletionRequestLogitBias](../../models/shared/createchatcompletionrequestlogitbias.md) | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
| | +| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
| | +| `messages` | [ChatCompletionRequestMessage](../../models/shared/chatcompletionrequestmessage.md)[] | :heavy_check_mark: | The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction). | | +| `model` | *string* | :heavy_check_mark: | ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported. | | +| `n` | *number* | :heavy_minus_sign: | How many chat completion choices to generate for each input message. | 1 | +| `presencePenalty` | *number* | :heavy_minus_sign: | completions_presence_penalty_description | | +| `stop` | *any* | :heavy_minus_sign: | Up to 4 sequences where the API will stop generating further tokens.
| | +| `stream` | *boolean* | :heavy_minus_sign: | If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
| | +| `temperature` | *number* | :heavy_minus_sign: | completions_temperature_description | 1 | +| `topP` | *number* | :heavy_minus_sign: | completions_top_p_description | 1 | +| `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionrequestlogitbias.md b/docs/models/shared/createchatcompletionrequestlogitbias.md new file mode 100755 index 0000000..6ad8754 --- /dev/null +++ b/docs/models/shared/createchatcompletionrequestlogitbias.md @@ -0,0 +1,12 @@ +# CreateChatCompletionRequestLogitBias + +Modify the likelihood of specified tokens appearing in the completion. + +Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionresponse.md b/docs/models/shared/createchatcompletionresponse.md new file mode 100755 index 0000000..14b4646 --- /dev/null +++ b/docs/models/shared/createchatcompletionresponse.md @@ -0,0 +1,15 @@ +# CreateChatCompletionResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | +| `choices` | [CreateChatCompletionResponseChoices](../../models/shared/createchatcompletionresponsechoices.md)[] | :heavy_check_mark: | N/A | +| `created` | *number* | :heavy_check_mark: | N/A | +| `id` | *string* | :heavy_check_mark: | N/A | +| `model` | *string* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | +| `usage` | [CreateChatCompletionResponseUsage](../../models/shared/createchatcompletionresponseusage.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionresponsechoices.md b/docs/models/shared/createchatcompletionresponsechoices.md new file mode 100755 index 0000000..96a3ded --- /dev/null +++ b/docs/models/shared/createchatcompletionresponsechoices.md @@ -0,0 +1,10 @@ +# CreateChatCompletionResponseChoices + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | +| `finishReason` | *string* | :heavy_minus_sign: | N/A | +| `index` | *number* | :heavy_minus_sign: | N/A | +| `message` | [ChatCompletionResponseMessage](../../models/shared/chatcompletionresponsemessage.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionresponseusage.md b/docs/models/shared/createchatcompletionresponseusage.md new file mode 100755 index 0000000..9aaef97 --- /dev/null +++ b/docs/models/shared/createchatcompletionresponseusage.md @@ -0,0 +1,10 @@ +# CreateChatCompletionResponseUsage + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `completionTokens` | *number* | :heavy_check_mark: | N/A | +| `promptTokens` | *number* | :heavy_check_mark: | N/A | +| `totalTokens` | *number* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createclassificationrequest.md b/docs/models/shared/createclassificationrequest.md new file mode 100755 index 0000000..ae408da --- /dev/null +++ b/docs/models/shared/createclassificationrequest.md @@ -0,0 +1,21 @@ +# CreateClassificationRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `examples` | *string*[][] | :heavy_minus_sign: | A list of examples with labels, in the following format:

`[["The movie is so interesting.", "Positive"], ["It is quite boring.", "Negative"], ...]`

All the label strings will be normalized to be capitalized.

You should specify either `examples` or `file`, but not both.
| | +| `expand` | *any* | :heavy_minus_sign: | N/A | | +| `file` | *string* | :heavy_minus_sign: | The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose.

You should specify either `examples` or `file`, but not both.
| | +| `labels` | *string*[] | :heavy_minus_sign: | The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized. | | +| `logitBias` | *any* | :heavy_minus_sign: | N/A | | +| `logprobs` | *any* | :heavy_minus_sign: | N/A | | +| `maxExamples` | *number* | :heavy_minus_sign: | The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. | | +| `model` | *any* | :heavy_check_mark: | N/A | | +| `query` | *string* | :heavy_check_mark: | Query to be classified. | The plot is not very attractive. | +| `returnMetadata` | *any* | :heavy_minus_sign: | N/A | | +| `returnPrompt` | *any* | :heavy_minus_sign: | N/A | | +| `searchModel` | *any* | :heavy_minus_sign: | N/A | | +| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. | 0 | +| `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/shared/createclassificationresponse.md b/docs/models/shared/createclassificationresponse.md new file mode 100755 index 0000000..409671d --- /dev/null +++ b/docs/models/shared/createclassificationresponse.md @@ -0,0 +1,15 @@ +# CreateClassificationResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | +| `completion` | *string* | :heavy_minus_sign: | N/A | +| `label` | *string* | :heavy_minus_sign: | N/A | +| `model` | *string* | :heavy_minus_sign: | N/A | +| `object` | *string* | :heavy_minus_sign: | N/A | +| `searchModel` | *string* | :heavy_minus_sign: | N/A | +| `selectedExamples` | [CreateClassificationResponseSelectedExamples](../../models/shared/createclassificationresponseselectedexamples.md)[] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createclassificationresponseselectedexamples.md b/docs/models/shared/createclassificationresponseselectedexamples.md new file mode 100755 index 0000000..acf8f7d --- /dev/null +++ b/docs/models/shared/createclassificationresponseselectedexamples.md @@ -0,0 +1,10 @@ +# CreateClassificationResponseSelectedExamples + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `document` | *number* | :heavy_minus_sign: | N/A | +| `label` | *string* | :heavy_minus_sign: | N/A | +| `text` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createcompletionrequest.md b/docs/models/shared/createcompletionrequest.md new file mode 100755 index 0000000..0554439 --- /dev/null +++ b/docs/models/shared/createcompletionrequest.md @@ -0,0 +1,23 @@ +# CreateCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `bestOf` | *number* | :heavy_minus_sign: | Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed.

When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.

**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
| | +| `echo` | *boolean* | :heavy_minus_sign: | Echo back the prompt in addition to the completion
| | +| `frequencyPenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.

[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
| | +| `logitBias` | [CreateCompletionRequestLogitBias](../../models/shared/createcompletionrequestlogitbias.md) | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.

As an example, you can pass `{"50256": -100}` to prevent the <\|endoftext\|> token from being generated.
| | +| `logprobs` | *number* | :heavy_minus_sign: | Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.

The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case.
| | +| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the completion.

The token count of your prompt plus `max_tokens` cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
| 16 | +| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. | | +| `n` | *number* | :heavy_minus_sign: | How many completions to generate for each prompt.

**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
| 1 | +| `presencePenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.

[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
| | +| `prompt` | *any* | :heavy_minus_sign: | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.

Note that <\|endoftext\|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
| | +| `stop` | *any* | :heavy_minus_sign: | Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
| | +| `stream` | *boolean* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
| | +| `suffix` | *string* | :heavy_minus_sign: | The suffix that comes after a completion of inserted text. | test. | +| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

We generally recommend altering this or `top_p` but not both.
| 1 | +| `topP` | *number* | :heavy_minus_sign: | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.

We generally recommend altering this or `temperature` but not both.
| 1 | +| `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | \ No newline at end of file diff --git a/docs/models/shared/createcompletionrequestlogitbias.md b/docs/models/shared/createcompletionrequestlogitbias.md new file mode 100755 index 0000000..f69d6bf --- /dev/null +++ b/docs/models/shared/createcompletionrequestlogitbias.md @@ -0,0 +1,14 @@ +# CreateCompletionRequestLogitBias + +Modify the likelihood of specified tokens appearing in the completion. + +Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + +As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponse.md b/docs/models/shared/createcompletionresponse.md new file mode 100755 index 0000000..7607489 --- /dev/null +++ b/docs/models/shared/createcompletionresponse.md @@ -0,0 +1,15 @@ +# CreateCompletionResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | +| `choices` | [CreateCompletionResponseChoices](../../models/shared/createcompletionresponsechoices.md)[] | :heavy_check_mark: | N/A | +| `created` | *number* | :heavy_check_mark: | N/A | +| `id` | *string* | :heavy_check_mark: | N/A | +| `model` | *string* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | +| `usage` | [CreateCompletionResponseUsage](../../models/shared/createcompletionresponseusage.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponsechoices.md b/docs/models/shared/createcompletionresponsechoices.md new file mode 100755 index 0000000..2bddd11 --- /dev/null +++ b/docs/models/shared/createcompletionresponsechoices.md @@ -0,0 +1,11 @@ +# CreateCompletionResponseChoices + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | +| `finishReason` | *string* | :heavy_minus_sign: | N/A | +| `index` | *number* | :heavy_minus_sign: | N/A | +| `logprobs` | [CreateCompletionResponseChoicesLogprobs](../../models/shared/createcompletionresponsechoiceslogprobs.md) | :heavy_minus_sign: | N/A | +| `text` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponsechoiceslogprobs.md b/docs/models/shared/createcompletionresponsechoiceslogprobs.md new file mode 100755 index 0000000..c9f707b --- /dev/null +++ b/docs/models/shared/createcompletionresponsechoiceslogprobs.md @@ -0,0 +1,11 @@ +# CreateCompletionResponseChoicesLogprobs + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | +| `textOffset` | *number*[] | :heavy_minus_sign: | N/A | +| `tokenLogprobs` | *number*[] | :heavy_minus_sign: | N/A | +| `tokens` | *string*[] | :heavy_minus_sign: | N/A | +| `topLogprobs` | [CreateCompletionResponseChoicesLogprobsTopLogprobs](../../models/shared/createcompletionresponsechoiceslogprobstoplogprobs.md)[] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponsechoiceslogprobstoplogprobs.md b/docs/models/shared/createcompletionresponsechoiceslogprobstoplogprobs.md new file mode 100755 index 0000000..7d341de --- /dev/null +++ b/docs/models/shared/createcompletionresponsechoiceslogprobstoplogprobs.md @@ -0,0 +1,7 @@ +# CreateCompletionResponseChoicesLogprobsTopLogprobs + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponseusage.md b/docs/models/shared/createcompletionresponseusage.md new file mode 100755 index 0000000..e124187 --- /dev/null +++ b/docs/models/shared/createcompletionresponseusage.md @@ -0,0 +1,10 @@ +# CreateCompletionResponseUsage + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `completionTokens` | *number* | :heavy_check_mark: | N/A | +| `promptTokens` | *number* | :heavy_check_mark: | N/A | +| `totalTokens` | *number* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createeditrequest.md b/docs/models/shared/createeditrequest.md new file mode 100755 index 0000000..09ea8b9 --- /dev/null +++ b/docs/models/shared/createeditrequest.md @@ -0,0 +1,13 @@ +# CreateEditRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | +| `input` | *string* | :heavy_minus_sign: | The input text to use as a starting point for the edit. | What day of the wek is it? | +| `instruction` | *string* | :heavy_check_mark: | The instruction that tells the model how to edit the prompt. | Fix the spelling mistakes. | +| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. | | +| `n` | *number* | :heavy_minus_sign: | How many edits to generate for the input and instruction. | 1 | +| `temperature` | *number* | :heavy_minus_sign: | completions_temperature_description | 1 | +| `topP` | *number* | :heavy_minus_sign: | completions_top_p_description | 1 | \ No newline at end of file diff --git a/docs/models/shared/createeditresponse.md b/docs/models/shared/createeditresponse.md new file mode 100755 index 0000000..b69aff7 --- /dev/null +++ b/docs/models/shared/createeditresponse.md @@ -0,0 +1,13 @@ +# CreateEditResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `choices` | [CreateEditResponseChoices](../../models/shared/createeditresponsechoices.md)[] | :heavy_check_mark: | N/A | +| `created` | *number* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | +| `usage` | [CreateEditResponseUsage](../../models/shared/createeditresponseusage.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createeditresponsechoices.md b/docs/models/shared/createeditresponsechoices.md new file mode 100755 index 0000000..b59fe1d --- /dev/null +++ b/docs/models/shared/createeditresponsechoices.md @@ -0,0 +1,11 @@ +# CreateEditResponseChoices + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | +| `finishReason` | *string* | :heavy_minus_sign: | N/A | +| `index` | *number* | :heavy_minus_sign: | N/A | +| `logprobs` | [CreateEditResponseChoicesLogprobs](../../models/shared/createeditresponsechoiceslogprobs.md) | :heavy_minus_sign: | N/A | +| `text` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createeditresponsechoiceslogprobs.md b/docs/models/shared/createeditresponsechoiceslogprobs.md new file mode 100755 index 0000000..a727a71 --- /dev/null +++ b/docs/models/shared/createeditresponsechoiceslogprobs.md @@ -0,0 +1,11 @@ +# CreateEditResponseChoicesLogprobs + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | +| `textOffset` | *number*[] | :heavy_minus_sign: | N/A | +| `tokenLogprobs` | *number*[] | :heavy_minus_sign: | N/A | +| `tokens` | *string*[] | :heavy_minus_sign: | N/A | +| `topLogprobs` | [CreateEditResponseChoicesLogprobsTopLogprobs](../../models/shared/createeditresponsechoiceslogprobstoplogprobs.md)[] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createeditresponsechoiceslogprobstoplogprobs.md b/docs/models/shared/createeditresponsechoiceslogprobstoplogprobs.md new file mode 100755 index 0000000..a81f3c2 --- /dev/null +++ b/docs/models/shared/createeditresponsechoiceslogprobstoplogprobs.md @@ -0,0 +1,7 @@ +# CreateEditResponseChoicesLogprobsTopLogprobs + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/shared/createeditresponseusage.md b/docs/models/shared/createeditresponseusage.md new file mode 100755 index 0000000..00c1a10 --- /dev/null +++ b/docs/models/shared/createeditresponseusage.md @@ -0,0 +1,10 @@ +# CreateEditResponseUsage + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `completionTokens` | *number* | :heavy_check_mark: | N/A | +| `promptTokens` | *number* | :heavy_check_mark: | N/A | +| `totalTokens` | *number* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createembeddingrequest.md b/docs/models/shared/createembeddingrequest.md new file mode 100755 index 0000000..2545fe4 --- /dev/null +++ b/docs/models/shared/createembeddingrequest.md @@ -0,0 +1,10 @@ +# CreateEmbeddingRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `input` | *any* | :heavy_check_mark: | Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length.
| +| `model` | *any* | :heavy_check_mark: | N/A | +| `user` | *any* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createembeddingresponse.md b/docs/models/shared/createembeddingresponse.md new file mode 100755 index 0000000..56cc317 --- /dev/null +++ b/docs/models/shared/createembeddingresponse.md @@ -0,0 +1,13 @@ +# CreateEmbeddingResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| `data` | [CreateEmbeddingResponseData](../../models/shared/createembeddingresponsedata.md)[] | :heavy_check_mark: | N/A | +| `model` | *string* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | +| `usage` | [CreateEmbeddingResponseUsage](../../models/shared/createembeddingresponseusage.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createembeddingresponsedata.md b/docs/models/shared/createembeddingresponsedata.md new file mode 100755 index 0000000..e819885 --- /dev/null +++ b/docs/models/shared/createembeddingresponsedata.md @@ -0,0 +1,10 @@ +# CreateEmbeddingResponseData + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `embedding` | *number*[] | :heavy_check_mark: | N/A | +| `index` | *number* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createembeddingresponseusage.md b/docs/models/shared/createembeddingresponseusage.md new file mode 100755 index 0000000..d633cc4 --- /dev/null +++ b/docs/models/shared/createembeddingresponseusage.md @@ -0,0 +1,9 @@ +# CreateEmbeddingResponseUsage + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `promptTokens` | *number* | :heavy_check_mark: | N/A | +| `totalTokens` | *number* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createfilerequest.md b/docs/models/shared/createfilerequest.md new file mode 100755 index 0000000..4d20d62 --- /dev/null +++ b/docs/models/shared/createfilerequest.md @@ -0,0 +1,9 @@ +# CreateFileRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [CreateFileRequestFile](../../models/shared/createfilerequestfile.md) | :heavy_check_mark: | Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded.

If the `purpose` is set to "fine-tune", each line is a JSON record with "prompt" and "completion" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
| +| `purpose` | *string* | :heavy_check_mark: | The intended purpose of the uploaded documents.

Use "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
| \ No newline at end of file diff --git a/docs/models/shared/createfilerequestfile.md b/docs/models/shared/createfilerequestfile.md new file mode 100755 index 0000000..3bb80b5 --- /dev/null +++ b/docs/models/shared/createfilerequestfile.md @@ -0,0 +1,9 @@ +# CreateFileRequestFile + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `content` | *Uint8Array* | :heavy_check_mark: | N/A | +| `file` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createfinetunerequest.md b/docs/models/shared/createfinetunerequest.md new file mode 100755 index 0000000..ee7ab0c --- /dev/null +++ b/docs/models/shared/createfinetunerequest.md @@ -0,0 +1,19 @@ +# CreateFineTuneRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `batchSize` | *number* | :heavy_minus_sign: | The batch size to use for training. The batch size is the number of
training examples used to train a single forward and backward pass.

By default, the batch size will be dynamically configured to be
~0.2% of the number of examples in the training set, capped at 256 -
in general, we've found that larger batch sizes tend to work better
for larger datasets.
| | +| `classificationBetas` | *number*[] | :heavy_minus_sign: | If this is provided, we calculate F-beta scores at the specified
beta values. The F-beta score is a generalization of F-1 score.
This is only used for binary classification.

With a beta of 1 (i.e. the F-1 score), precision and recall are
given the same weight. A larger beta score puts more weight on
recall and less on precision. A smaller beta score puts more weight
on precision and less on recall.
| | +| `classificationNClasses` | *number* | :heavy_minus_sign: | The number of classes in a classification task.

This parameter is required for multiclass classification.
| | +| `classificationPositiveClass` | *string* | :heavy_minus_sign: | The positive class in binary classification.

This parameter is needed to generate precision, recall, and F1
metrics when doing binary classification.
| | +| `computeClassificationMetrics` | *boolean* | :heavy_minus_sign: | If set, we calculate classification-specific metrics such as accuracy
and F-1 score using the validation set at the end of every epoch.
These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model).

In order to compute classification metrics, you must provide a
`validation_file`. Additionally, you must
specify `classification_n_classes` for multiclass classification or
`classification_positive_class` for binary classification.
| | +| `learningRateMultiplier` | *number* | :heavy_minus_sign: | The learning rate multiplier to use for training.
The fine-tuning learning rate is the original learning rate used for
pretraining multiplied by this value.

By default, the learning rate multiplier is the 0.05, 0.1, or 0.2
depending on final `batch_size` (larger learning rates tend to
perform better with larger batch sizes). We recommend experimenting
with values in the range 0.02 to 0.2 to see what produces the best
results.
| | +| `model` | *string* | :heavy_minus_sign: | The name of the base model to fine-tune. You can select one of "ada",
"babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21.
To learn more about these models, see the
[Models](https://platform.openai.com/docs/models) documentation.
| | +| `nEpochs` | *number* | :heavy_minus_sign: | The number of epochs to train the model for. An epoch refers to one
full cycle through the training dataset.
| | +| `promptLossWeight` | *number* | :heavy_minus_sign: | The weight to use for loss on the prompt tokens. This controls how
much the model tries to learn to generate the prompt (as compared
to the completion which always has a weight of 1.0), and can add
a stabilizing effect to training when completions are short.

If prompts are extremely long (relative to completions), it may make
sense to reduce this weight so as to avoid over-prioritizing
learning the prompt.
| | +| `suffix` | *string* | :heavy_minus_sign: | A string of up to 40 characters that will be added to your fine-tuned model name.

For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
| | +| `trainingFile` | *string* | :heavy_check_mark: | The ID of an uploaded file that contains training data.

See [upload file](/docs/api-reference/files/upload) for how to upload a file.

Your dataset must be formatted as a JSONL file, where each training
example is a JSON object with the keys "prompt" and "completion".
Additionally, you must upload your file with the purpose `fine-tune`.

See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.
| file-ajSREls59WBbvgSzJSVWxMCB | +| `validationFile` | *string* | :heavy_minus_sign: | The ID of an uploaded file that contains validation data.

If you provide this file, the data is used to generate validation
metrics periodically during fine-tuning. These metrics can be viewed in
the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model).
Your train and validation data should be mutually exclusive.

Your dataset must be formatted as a JSONL file, where each validation
example is a JSON object with the keys "prompt" and "completion".
Additionally, you must upload your file with the purpose `fine-tune`.

See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.
| file-XjSREls59WBbvgSzJSVWxMCa | \ No newline at end of file diff --git a/docs/models/shared/createimageeditrequest.md b/docs/models/shared/createimageeditrequest.md new file mode 100755 index 0000000..e280422 --- /dev/null +++ b/docs/models/shared/createimageeditrequest.md @@ -0,0 +1,14 @@ +# CreateImageEditRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `image` | [CreateImageEditRequestImage](../../models/shared/createimageeditrequestimage.md) | :heavy_check_mark: | The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. | | +| `mask` | [CreateImageEditRequestMask](../../models/shared/createimageeditrequestmask.md) | :heavy_minus_sign: | An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. | | +| `n` | *any* | :heavy_minus_sign: | N/A | | +| `prompt` | *string* | :heavy_check_mark: | A text description of the desired image(s). The maximum length is 1000 characters. | A cute baby sea otter wearing a beret | +| `responseFormat` | *any* | :heavy_minus_sign: | N/A | | +| `size` | *any* | :heavy_minus_sign: | N/A | | +| `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/shared/createimageeditrequestimage.md b/docs/models/shared/createimageeditrequestimage.md new file mode 100755 index 0000000..8078b91 --- /dev/null +++ b/docs/models/shared/createimageeditrequestimage.md @@ -0,0 +1,9 @@ +# CreateImageEditRequestImage + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `content` | *Uint8Array* | :heavy_check_mark: | N/A | +| `image` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createimageeditrequestmask.md b/docs/models/shared/createimageeditrequestmask.md new file mode 100755 index 0000000..0d533a5 --- /dev/null +++ b/docs/models/shared/createimageeditrequestmask.md @@ -0,0 +1,9 @@ +# CreateImageEditRequestMask + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `content` | *Uint8Array* | :heavy_check_mark: | N/A | +| `mask` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createimagerequest.md b/docs/models/shared/createimagerequest.md new file mode 100755 index 0000000..8885741 --- /dev/null +++ b/docs/models/shared/createimagerequest.md @@ -0,0 +1,12 @@ +# CreateImageRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | +| `n` | *number* | :heavy_minus_sign: | The number of images to generate. Must be between 1 and 10. | 1 | +| `prompt` | *string* | :heavy_check_mark: | A text description of the desired image(s). The maximum length is 1000 characters. | A cute baby sea otter | +| `responseFormat` | [CreateImageRequestResponseFormat](../../models/shared/createimagerequestresponseformat.md) | :heavy_minus_sign: | The format in which the generated images are returned. Must be one of `url` or `b64_json`. | url | +| `size` | [CreateImageRequestSize](../../models/shared/createimagerequestsize.md) | :heavy_minus_sign: | The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. | 1024x1024 | +| `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/shared/createimagerequestresponseformat.md b/docs/models/shared/createimagerequestresponseformat.md new file mode 100755 index 0000000..8900446 --- /dev/null +++ b/docs/models/shared/createimagerequestresponseformat.md @@ -0,0 +1,11 @@ +# CreateImageRequestResponseFormat + +The format in which the generated images are returned. Must be one of `url` or `b64_json`. + + +## Values + +| Name | Value | +| --------- | --------- | +| `Url` | url | +| `B64Json` | b64_json | \ No newline at end of file diff --git a/docs/models/shared/createimagerequestsize.md b/docs/models/shared/createimagerequestsize.md new file mode 100755 index 0000000..7c8d43d --- /dev/null +++ b/docs/models/shared/createimagerequestsize.md @@ -0,0 +1,12 @@ +# CreateImageRequestSize + +The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + + +## Values + +| Name | Value | +| ------------------------------- | ------------------------------- | +| `TwoHundredAndFiftySixx256` | 256x256 | +| `FiveHundredAndTwelvex512` | 512x512 | +| `OneThousandAndTwentyFourx1024` | 1024x1024 | \ No newline at end of file diff --git a/docs/models/shared/createimagevariationrequest.md b/docs/models/shared/createimagevariationrequest.md new file mode 100755 index 0000000..582a7ad --- /dev/null +++ b/docs/models/shared/createimagevariationrequest.md @@ -0,0 +1,12 @@ +# CreateImageVariationRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | +| `image` | [CreateImageVariationRequestImage](../../models/shared/createimagevariationrequestimage.md) | :heavy_check_mark: | The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. | +| `n` | *any* | :heavy_minus_sign: | N/A | +| `responseFormat` | *any* | :heavy_minus_sign: | N/A | +| `size` | *any* | :heavy_minus_sign: | N/A | +| `user` | *any* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createimagevariationrequestimage.md b/docs/models/shared/createimagevariationrequestimage.md new file mode 100755 index 0000000..98b6b8c --- /dev/null +++ b/docs/models/shared/createimagevariationrequestimage.md @@ -0,0 +1,9 @@ +# CreateImageVariationRequestImage + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `content` | *Uint8Array* | :heavy_check_mark: | N/A | +| `image` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createmoderationrequest.md b/docs/models/shared/createmoderationrequest.md new file mode 100755 index 0000000..46e2e02 --- /dev/null +++ b/docs/models/shared/createmoderationrequest.md @@ -0,0 +1,9 @@ +# CreateModerationRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input` | *any* | :heavy_check_mark: | The input text to classify | | +| `model` | *string* | :heavy_minus_sign: | Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.

The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
| text-moderation-stable | \ No newline at end of file diff --git a/docs/models/shared/createmoderationresponse.md b/docs/models/shared/createmoderationresponse.md new file mode 100755 index 0000000..76624f1 --- /dev/null +++ b/docs/models/shared/createmoderationresponse.md @@ -0,0 +1,12 @@ +# CreateModerationResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | +| `id` | *string* | :heavy_check_mark: | N/A | +| `model` | *string* | :heavy_check_mark: | N/A | +| `results` | [CreateModerationResponseResults](../../models/shared/createmoderationresponseresults.md)[] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createmoderationresponseresults.md b/docs/models/shared/createmoderationresponseresults.md new file mode 100755 index 0000000..111730c --- /dev/null +++ b/docs/models/shared/createmoderationresponseresults.md @@ -0,0 +1,10 @@ +# CreateModerationResponseResults + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | +| `categories` | [CreateModerationResponseResultsCategories](../../models/shared/createmoderationresponseresultscategories.md) | :heavy_check_mark: | N/A | +| `categoryScores` | [CreateModerationResponseResultsCategoryScores](../../models/shared/createmoderationresponseresultscategoryscores.md) | :heavy_check_mark: | N/A | +| `flagged` | *boolean* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createmoderationresponseresultscategories.md b/docs/models/shared/createmoderationresponseresultscategories.md new file mode 100755 index 0000000..b965612 --- /dev/null +++ b/docs/models/shared/createmoderationresponseresultscategories.md @@ -0,0 +1,14 @@ +# CreateModerationResponseResultsCategories + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `hate` | *boolean* | :heavy_check_mark: | N/A | +| `hateThreatening` | *boolean* | :heavy_check_mark: | N/A | +| `selfHarm` | *boolean* | :heavy_check_mark: | N/A | +| `sexual` | *boolean* | :heavy_check_mark: | N/A | +| `sexualMinors` | *boolean* | :heavy_check_mark: | N/A | +| `violence` | *boolean* | :heavy_check_mark: | N/A | +| `violenceGraphic` | *boolean* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createmoderationresponseresultscategoryscores.md b/docs/models/shared/createmoderationresponseresultscategoryscores.md new file mode 100755 index 0000000..f88d830 --- /dev/null +++ b/docs/models/shared/createmoderationresponseresultscategoryscores.md @@ -0,0 +1,14 @@ +# CreateModerationResponseResultsCategoryScores + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `hate` | *number* | :heavy_check_mark: | N/A | +| `hateThreatening` | *number* | :heavy_check_mark: | N/A | +| `selfHarm` | *number* | :heavy_check_mark: | N/A | +| `sexual` | *number* | :heavy_check_mark: | N/A | +| `sexualMinors` | *number* | :heavy_check_mark: | N/A | +| `violence` | *number* | :heavy_check_mark: | N/A | +| `violenceGraphic` | *number* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createsearchrequest.md b/docs/models/shared/createsearchrequest.md new file mode 100755 index 0000000..d07acc8 --- /dev/null +++ b/docs/models/shared/createsearchrequest.md @@ -0,0 +1,13 @@ +# CreateSearchRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `documents` | *string*[] | :heavy_minus_sign: | Up to 200 documents to search over, provided as a list of strings.

The maximum document length (in tokens) is 2034 minus the number of tokens in the query.

You should specify either `documents` or a `file`, but not both.
| | +| `file` | *string* | :heavy_minus_sign: | The ID of an uploaded file that contains documents to search over.

You should specify either `documents` or a `file`, but not both.
| | +| `maxRerank` | *number* | :heavy_minus_sign: | The maximum number of documents to be re-ranked and returned by search.

This flag only takes effect when `file` is set.
| | +| `query` | *string* | :heavy_check_mark: | Query to search against the documents. | the president | +| `returnMetadata` | *boolean* | :heavy_minus_sign: | A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a "metadata" field.

This flag only takes effect when `file` is set.
| | +| `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/shared/createsearchresponse.md b/docs/models/shared/createsearchresponse.md new file mode 100755 index 0000000..3f9b362 --- /dev/null +++ b/docs/models/shared/createsearchresponse.md @@ -0,0 +1,12 @@ +# CreateSearchResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | +| `data` | [CreateSearchResponseData](../../models/shared/createsearchresponsedata.md)[] | :heavy_minus_sign: | N/A | +| `model` | *string* | :heavy_minus_sign: | N/A | +| `object` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createsearchresponsedata.md b/docs/models/shared/createsearchresponsedata.md new file mode 100755 index 0000000..4d79925 --- /dev/null +++ b/docs/models/shared/createsearchresponsedata.md @@ -0,0 +1,10 @@ +# CreateSearchResponseData + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `document` | *number* | :heavy_minus_sign: | N/A | +| `object` | *string* | :heavy_minus_sign: | N/A | +| `score` | *number* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequest.md b/docs/models/shared/createtranscriptionrequest.md new file mode 100755 index 0000000..1e72ffa --- /dev/null +++ b/docs/models/shared/createtranscriptionrequest.md @@ -0,0 +1,13 @@ +# CreateTranscriptionRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [CreateTranscriptionRequestFile](../../models/shared/createtranscriptionrequestfile.md) | :heavy_check_mark: | The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
| +| `language` | *string* | :heavy_minus_sign: | The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
| +| `model` | *string* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| +| `prompt` | *string* | :heavy_minus_sign: | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
| +| `responseFormat` | *string* | :heavy_minus_sign: | The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
| +| `temperature` | *number* | :heavy_minus_sign: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
| \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequestfile.md b/docs/models/shared/createtranscriptionrequestfile.md new file mode 100755 index 0000000..76b878c --- /dev/null +++ b/docs/models/shared/createtranscriptionrequestfile.md @@ -0,0 +1,9 @@ +# CreateTranscriptionRequestFile + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `content` | *Uint8Array* | :heavy_check_mark: | N/A | +| `file` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionresponse.md b/docs/models/shared/createtranscriptionresponse.md new file mode 100755 index 0000000..87de4e9 --- /dev/null +++ b/docs/models/shared/createtranscriptionresponse.md @@ -0,0 +1,10 @@ +# CreateTranscriptionResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createtranslationrequest.md b/docs/models/shared/createtranslationrequest.md new file mode 100755 index 0000000..322e61e --- /dev/null +++ b/docs/models/shared/createtranslationrequest.md @@ -0,0 +1,12 @@ +# CreateTranslationRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [CreateTranslationRequestFile](../../models/shared/createtranslationrequestfile.md) | :heavy_check_mark: | The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
| +| `model` | *string* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| +| `prompt` | *string* | :heavy_minus_sign: | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
| +| `responseFormat` | *string* | :heavy_minus_sign: | The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
| +| `temperature` | *number* | :heavy_minus_sign: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
| \ No newline at end of file diff --git a/docs/models/shared/createtranslationrequestfile.md b/docs/models/shared/createtranslationrequestfile.md new file mode 100755 index 0000000..f143930 --- /dev/null +++ b/docs/models/shared/createtranslationrequestfile.md @@ -0,0 +1,9 @@ +# CreateTranslationRequestFile + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `content` | *Uint8Array* | :heavy_check_mark: | N/A | +| `file` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createtranslationresponse.md b/docs/models/shared/createtranslationresponse.md new file mode 100755 index 0000000..13a17f7 --- /dev/null +++ b/docs/models/shared/createtranslationresponse.md @@ -0,0 +1,10 @@ +# CreateTranslationResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/deletefileresponse.md b/docs/models/shared/deletefileresponse.md new file mode 100755 index 0000000..4ea35c1 --- /dev/null +++ b/docs/models/shared/deletefileresponse.md @@ -0,0 +1,12 @@ +# DeleteFileResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `deleted` | *boolean* | :heavy_check_mark: | N/A | +| `id` | *string* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/deletemodelresponse.md b/docs/models/shared/deletemodelresponse.md new file mode 100755 index 0000000..2344b1b --- /dev/null +++ b/docs/models/shared/deletemodelresponse.md @@ -0,0 +1,12 @@ +# DeleteModelResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `deleted` | *boolean* | :heavy_check_mark: | N/A | +| `id` | *string* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/engine.md b/docs/models/shared/engine.md new file mode 100755 index 0000000..66a3ff8 --- /dev/null +++ b/docs/models/shared/engine.md @@ -0,0 +1,13 @@ +# Engine + +OK + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `created` | *number* | :heavy_check_mark: | N/A | +| `id` | *string* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | +| `ready` | *boolean* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/finetune.md b/docs/models/shared/finetune.md new file mode 100755 index 0000000..ee00252 --- /dev/null +++ b/docs/models/shared/finetune.md @@ -0,0 +1,22 @@ +# FineTune + +OK + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | +| `createdAt` | *number* | :heavy_check_mark: | N/A | +| `events` | [FineTuneEvent](../../models/shared/finetuneevent.md)[] | :heavy_minus_sign: | N/A | +| `fineTunedModel` | *string* | :heavy_check_mark: | N/A | +| `hyperparams` | [FineTuneHyperparams](../../models/shared/finetunehyperparams.md) | :heavy_check_mark: | N/A | +| `id` | *string* | :heavy_check_mark: | N/A | +| `model` | *string* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | +| `organizationId` | *string* | :heavy_check_mark: | N/A | +| `resultFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | N/A | +| `status` | *string* | :heavy_check_mark: | N/A | +| `trainingFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | N/A | +| `updatedAt` | *number* | :heavy_check_mark: | N/A | +| `validationFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/finetuneevent.md b/docs/models/shared/finetuneevent.md new file mode 100755 index 0000000..57ab0e6 --- /dev/null +++ b/docs/models/shared/finetuneevent.md @@ -0,0 +1,11 @@ +# FineTuneEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `createdAt` | *number* | :heavy_check_mark: | N/A | +| `level` | *string* | :heavy_check_mark: | N/A | +| `message` | *string* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/finetunehyperparams.md b/docs/models/shared/finetunehyperparams.md new file mode 100755 index 0000000..09a15c4 --- /dev/null +++ b/docs/models/shared/finetunehyperparams.md @@ -0,0 +1,7 @@ +# FineTuneHyperparams + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/shared/imagesresponse.md b/docs/models/shared/imagesresponse.md new file mode 100755 index 0000000..49610b7 --- /dev/null +++ b/docs/models/shared/imagesresponse.md @@ -0,0 +1,11 @@ +# ImagesResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | +| `created` | *number* | :heavy_check_mark: | N/A | +| `data` | [ImagesResponseData](../../models/shared/imagesresponsedata.md)[] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/imagesresponsedata.md b/docs/models/shared/imagesresponsedata.md new file mode 100755 index 0000000..70f28eb --- /dev/null +++ b/docs/models/shared/imagesresponsedata.md @@ -0,0 +1,9 @@ +# ImagesResponseData + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `b64Json` | *string* | :heavy_minus_sign: | N/A | +| `url` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/listenginesresponse.md b/docs/models/shared/listenginesresponse.md new file mode 100755 index 0000000..e89b7c4 --- /dev/null +++ b/docs/models/shared/listenginesresponse.md @@ -0,0 +1,11 @@ +# ListEnginesResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------- | ----------------------------------------- | ----------------------------------------- | ----------------------------------------- | +| `data` | [Engine](../../models/shared/engine.md)[] | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/listfilesresponse.md b/docs/models/shared/listfilesresponse.md new file mode 100755 index 0000000..33b46cb --- /dev/null +++ b/docs/models/shared/listfilesresponse.md @@ -0,0 +1,11 @@ +# ListFilesResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------- | ------------------------------------------------- | ------------------------------------------------- | ------------------------------------------------- | +| `data` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/listfinetuneeventsresponse.md b/docs/models/shared/listfinetuneeventsresponse.md new file mode 100755 index 0000000..327a8b5 --- /dev/null +++ b/docs/models/shared/listfinetuneeventsresponse.md @@ -0,0 +1,11 @@ +# ListFineTuneEventsResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `data` | [FineTuneEvent](../../models/shared/finetuneevent.md)[] | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/listfinetunesresponse.md b/docs/models/shared/listfinetunesresponse.md new file mode 100755 index 0000000..509428c --- /dev/null +++ b/docs/models/shared/listfinetunesresponse.md @@ -0,0 +1,11 @@ +# ListFineTunesResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `data` | [FineTune](../../models/shared/finetune.md)[] | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/listmodelsresponse.md b/docs/models/shared/listmodelsresponse.md new file mode 100755 index 0000000..b73fcce --- /dev/null +++ b/docs/models/shared/listmodelsresponse.md @@ -0,0 +1,11 @@ +# ListModelsResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------- | --------------------------------------- | --------------------------------------- | --------------------------------------- | +| `data` | [Model](../../models/shared/model.md)[] | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/model.md b/docs/models/shared/model.md new file mode 100755 index 0000000..2812a9b --- /dev/null +++ b/docs/models/shared/model.md @@ -0,0 +1,13 @@ +# Model + +OK + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `created` | *number* | :heavy_check_mark: | N/A | +| `id` | *string* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | +| `ownedBy` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/openaifile.md b/docs/models/shared/openaifile.md new file mode 100755 index 0000000..affa24a --- /dev/null +++ b/docs/models/shared/openaifile.md @@ -0,0 +1,17 @@ +# OpenAIFile + +OK + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | +| `bytes` | *number* | :heavy_check_mark: | N/A | +| `createdAt` | *number* | :heavy_check_mark: | N/A | +| `filename` | *string* | :heavy_check_mark: | N/A | +| `id` | *string* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | +| `purpose` | *string* | :heavy_check_mark: | N/A | +| `status` | *string* | :heavy_minus_sign: | N/A | +| `statusDetails` | [OpenAIFileStatusDetails](../../models/shared/openaifilestatusdetails.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/openaifilestatusdetails.md b/docs/models/shared/openaifilestatusdetails.md new file mode 100755 index 0000000..adf4a8d --- /dev/null +++ b/docs/models/shared/openaifilestatusdetails.md @@ -0,0 +1,7 @@ +# OpenAIFileStatusDetails + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/openai/README.md b/docs/openai/README.md deleted file mode 100755 index 3cb8865..0000000 --- a/docs/openai/README.md +++ /dev/null @@ -1,910 +0,0 @@ -# openAI - -## Overview - -The OpenAI REST API - -### Available Operations - -* [cancelFineTune](#cancelfinetune) - Immediately cancel a fine-tune job. - -* [~~createAnswer~~](#createanswer) - Answers the specified question using the provided documents and examples. - -The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). - :warning: **Deprecated** -* [createChatCompletion](#createchatcompletion) - Creates a completion for the chat message -* [~~createClassification~~](#createclassification) - Classifies the specified `query` using provided examples. - -The endpoint first [searches](/docs/api-reference/searches) over the labeled examples -to select the ones most relevant for the particular query. Then, the relevant examples -are combined with the query to construct a prompt to produce the final label via the -[completions](/docs/api-reference/completions) endpoint. - -Labeled examples can be provided via an uploaded `file`, or explicitly listed in the -request using the `examples` parameter for quick tests and small scale use cases. - :warning: **Deprecated** -* [createCompletion](#createcompletion) - Creates a completion for the provided prompt and parameters -* [createEdit](#createedit) - Creates a new edit for the provided input, instruction, and parameters. -* [createEmbedding](#createembedding) - Creates an embedding vector representing the input text. -* [createFile](#createfile) - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. - -* [createFineTune](#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. - -Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - -[Learn more about Fine-tuning](/docs/guides/fine-tuning) - -* [createImage](#createimage) - Creates an image given a prompt. -* [createImageEdit](#createimageedit) - Creates an edited or extended image given an original image and a prompt. -* [createImageVariation](#createimagevariation) - Creates a variation of a given image. -* [createModeration](#createmoderation) - Classifies if text violates OpenAI's Content Policy -* [~~createSearch~~](#createsearch) - The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. - -To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. - -The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. - :warning: **Deprecated** -* [createTranscription](#createtranscription) - Transcribes audio into the input language. -* [createTranslation](#createtranslation) - Translates audio into into English. -* [deleteFile](#deletefile) - Delete a file. -* [deleteModel](#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization. -* [downloadFile](#downloadfile) - Returns the contents of the specified file -* [~~listEngines~~](#listengines) - Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. :warning: **Deprecated** -* [listFiles](#listfiles) - Returns a list of files that belong to the user's organization. -* [listFineTuneEvents](#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. - -* [listFineTunes](#listfinetunes) - List your organization's fine-tuning jobs - -* [listModels](#listmodels) - Lists the currently available models, and provides basic information about each one such as the owner and availability. -* [~~retrieveEngine~~](#retrieveengine) - Retrieves a model instance, providing basic information about it such as the owner and availability. :warning: **Deprecated** -* [retrieveFile](#retrievefile) - Returns information about a specific file. -* [retrieveFineTune](#retrievefinetune) - Gets info about the fine-tune job. - -[Learn more about Fine-tuning](/docs/guides/fine-tuning) - -* [retrieveModel](#retrievemodel) - Retrieves a model instance, providing basic information about the model such as the owner and permissioning. - -## cancelFineTune - -Immediately cancel a fine-tune job. - - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CancelFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.cancelFineTune({ - fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", -}).then((res: CancelFineTuneResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## ~~createAnswer~~ - -Answers the specified question using the provided documents and examples. - -The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). - - -> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateAnswerResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createAnswer({ - documents: [ - "provident", - "distinctio", - "quibusdam", - ], - examples: [ - [ - "corrupti", - "illum", - "vel", - "error", - ], - [ - "suscipit", - "iure", - "magnam", - ], - [ - "ipsa", - "delectus", - "tempora", - "suscipit", - ], - ], - examplesContext: "Ottawa, Canada's capital, is located in the east of southern Ontario, near the city of Montréal and the U.S. border.", - expand: [ - "minus", - "placeat", - ], - file: "voluptatum", - logitBias: "iusto", - logprobs: 568045, - maxRerank: 392785, - maxTokens: 925597, - model: "temporibus", - n: 71036, - question: "What is the capital of Japan?", - returnMetadata: "quis", - returnPrompt: false, - searchModel: "veritatis", - stop: [ - "["\n"]", - ], - temperature: 3682.41, - user: "repellendus", -}).then((res: CreateAnswerResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## createChatCompletion - -Creates a completion for the chat message - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateChatCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { ChatCompletionRequestMessageRole, ChatCompletionResponseMessageRole } from "@speakeasy-api/openai/dist/sdk/models/shared"; - -const sdk = new Gpt(); - -sdk.openAI.createChatCompletion({ - frequencyPenalty: 9571.56, - logitBias: { - "odit": "at", - "at": "maiores", - "molestiae": "quod", - "quod": "esse", - }, - maxTokens: 520478, - messages: [ - { - content: "dolorum", - name: "Antoinette Nikolaus", - role: ChatCompletionRequestMessageRole.User, - }, - { - content: "hic", - name: "Everett Breitenberg", - role: ChatCompletionRequestMessageRole.System, - }, - { - content: "qui", - name: "Jonathon Klocko", - role: ChatCompletionRequestMessageRole.System, - }, - { - content: "perferendis", - name: "Faye Cormier", - role: ChatCompletionRequestMessageRole.User, - }, - ], - model: "laboriosam", - n: 1, - presencePenalty: 9437.49, - stop: [ - "in", - "corporis", - "iste", - ], - stream: false, - temperature: 1, - topP: 1, - user: "iure", -}).then((res: CreateChatCompletionResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## ~~createClassification~~ - -Classifies the specified `query` using provided examples. - -The endpoint first [searches](/docs/api-reference/searches) over the labeled examples -to select the ones most relevant for the particular query. Then, the relevant examples -are combined with the query to construct a prompt to produce the final label via the -[completions](/docs/api-reference/completions) endpoint. - -Labeled examples can be provided via an uploaded `file`, or explicitly listed in the -request using the `examples` parameter for quick tests and small scale use cases. - - -> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateClassificationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createClassification({ - examples: [ - [ - "architecto", - "ipsa", - "reiciendis", - ], - [ - "mollitia", - "laborum", - "dolores", - ], - [ - "corporis", - ], - [ - "nobis", - ], - ], - expand: "enim", - file: "omnis", - labels: [ - "minima", - "excepturi", - ], - logitBias: "accusantium", - logprobs: "iure", - maxExamples: 634274, - model: "doloribus", - query: "The plot is not very attractive.", - returnMetadata: "sapiente", - returnPrompt: "architecto", - searchModel: "mollitia", - temperature: 0, - user: "dolorem", -}).then((res: CreateClassificationResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## createCompletion - -Creates a completion for the provided prompt and parameters - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createCompletion({ - bestOf: 635059, - echo: false, - frequencyPenalty: 1613.09, - logitBias: { - "mollitia": "occaecati", - "numquam": "commodi", - "quam": "molestiae", - "velit": "error", - }, - logprobs: 158969, - maxTokens: 16, - model: "quis", - n: 1, - presencePenalty: 1103.75, - prompt: [ - 317202, - 138183, - 778346, - ], - stop: " -", - stream: false, - suffix: "test.", - temperature: 1, - topP: 1, - user: "user-1234", -}).then((res: CreateCompletionResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## createEdit - -Creates a new edit for the provided input, instruction, and parameters. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateEditResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createEdit({ - input: "What day of the wek is it?", - instruction: "Fix the spelling mistakes.", - model: "tenetur", - n: 1, - temperature: 1, - topP: 1, -}).then((res: CreateEditResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## createEmbedding - -Creates an embedding vector representing the input text. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateEmbeddingResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createEmbedding({ - input: [ - "This is a test.", - "This is a test.", - "This is a test.", - ], - model: "possimus", - user: "aut", -}).then((res: CreateEmbeddingResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## createFile - -Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. - - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createFile({ - file: { - content: "quasi".encode(), - file: "error", - }, - purpose: "temporibus", -}).then((res: CreateFileResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## createFineTune - -Creates a job that fine-tunes a specified model from a given dataset. - -Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - -[Learn more about Fine-tuning](/docs/guides/fine-tuning) - - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createFineTune({ - batchSize: 673660, - classificationBetas: [ - 9719.45, - ], - classificationNClasses: 976460, - classificationPositiveClass: "vero", - computeClassificationMetrics: false, - learningRateMultiplier: 4686.51, - model: "praesentium", - nEpochs: 976762, - promptLossWeight: 557.14, - suffix: "omnis", - trainingFile: "file-ajSREls59WBbvgSzJSVWxMCB", - validationFile: "file-XjSREls59WBbvgSzJSVWxMCa", -}).then((res: CreateFineTuneResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## createImage - -Creates an image given a prompt. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateImageResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { CreateImageRequestResponseFormat, CreateImageRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; - -const sdk = new Gpt(); - -sdk.openAI.createImage({ - n: 1, - prompt: "A cute baby sea otter", - responseFormat: CreateImageRequestResponseFormat.Url, - size: CreateImageRequestSize.OneThousandAndTwentyFourx1024, - user: "voluptate", -}).then((res: CreateImageResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## createImageEdit - -Creates an edited or extended image given an original image and a prompt. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateImageEditResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createImageEdit({ - image: { - content: "cum".encode(), - image: "perferendis", - }, - mask: { - content: "doloremque".encode(), - mask: "reprehenderit", - }, - n: "ut", - prompt: "A cute baby sea otter wearing a beret", - responseFormat: "maiores", - size: "dicta", - user: "corporis", -}).then((res: CreateImageEditResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## createImageVariation - -Creates a variation of a given image. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateImageVariationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createImageVariation({ - image: { - content: "dolore".encode(), - image: "iusto", - }, - n: "dicta", - responseFormat: "harum", - size: "enim", - user: "accusamus", -}).then((res: CreateImageVariationResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## createModeration - -Classifies if text violates OpenAI's Content Policy - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateModerationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createModeration({ - input: "I want to kill them.", - model: "text-moderation-stable", -}).then((res: CreateModerationResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## ~~createSearch~~ - -The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. - -To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. - -The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. - - -> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateSearchResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createSearch({ - createSearchRequest: { - documents: [ - "quae", - "ipsum", - "quidem", - "molestias", - ], - file: "excepturi", - maxRerank: 865103, - query: "the president", - returnMetadata: false, - user: "modi", - }, - engineId: "davinci", -}).then((res: CreateSearchResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## createTranscription - -Transcribes audio into the input language. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateTranscriptionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createTranscription({ - file: { - content: "praesentium".encode(), - file: "rem", - }, - language: "voluptates", - model: "quasi", - prompt: "repudiandae", - responseFormat: "sint", - temperature: 831.12, -}).then((res: CreateTranscriptionResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## createTranslation - -Translates audio into into English. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateTranslationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createTranslation({ - file: { - content: "itaque".encode(), - file: "incidunt", - }, - model: "enim", - prompt: "consequatur", - responseFormat: "est", - temperature: 8423.42, -}).then((res: CreateTranslationResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## deleteFile - -Delete a file. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { DeleteFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.deleteFile({ - fileId: "explicabo", -}).then((res: DeleteFileResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## deleteModel - -Delete a fine-tuned model. You must have the Owner role in your organization. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { DeleteModelResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.deleteModel({ - model: "curie:ft-acmeco-2021-03-03-21-44-20", -}).then((res: DeleteModelResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## downloadFile - -Returns the contents of the specified file - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { DownloadFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.downloadFile({ - fileId: "deserunt", -}).then((res: DownloadFileResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## ~~listEngines~~ - -Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. - -> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { ListEnginesResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.listEngines().then((res: ListEnginesResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## listFiles - -Returns a list of files that belong to the user's organization. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { ListFilesResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.listFiles().then((res: ListFilesResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## listFineTuneEvents - -Get fine-grained status updates for a fine-tune job. - - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { ListFineTuneEventsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.listFineTuneEvents({ - fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - stream: false, -}).then((res: ListFineTuneEventsResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## listFineTunes - -List your organization's fine-tuning jobs - - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { ListFineTunesResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.listFineTunes().then((res: ListFineTunesResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## listModels - -Lists the currently available models, and provides basic information about each one such as the owner and availability. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { ListModelsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.listModels().then((res: ListModelsResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## ~~retrieveEngine~~ - -Retrieves a model instance, providing basic information about it such as the owner and availability. - -> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { RetrieveEngineResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.retrieveEngine({ - engineId: "davinci", -}).then((res: RetrieveEngineResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## retrieveFile - -Returns information about a specific file. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { RetrieveFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.retrieveFile({ - fileId: "distinctio", -}).then((res: RetrieveFileResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## retrieveFineTune - -Gets info about the fine-tune job. - -[Learn more about Fine-tuning](/docs/guides/fine-tuning) - - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { RetrieveFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.retrieveFineTune({ - fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", -}).then((res: RetrieveFineTuneResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -## retrieveModel - -Retrieves a model instance, providing basic information about the model such as the owner and permissioning. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { RetrieveModelResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.retrieveModel({ - model: "text-davinci-001", -}).then((res: RetrieveModelResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` diff --git a/docs/gpt/README.md b/docs/sdks/gpt/README.md similarity index 100% rename from docs/gpt/README.md rename to docs/sdks/gpt/README.md diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md new file mode 100755 index 0000000..58244b9 --- /dev/null +++ b/docs/sdks/openai/README.md @@ -0,0 +1,1238 @@ +# openAI + +## Overview + +The OpenAI REST API + +### Available Operations + +* [cancelFineTune](#cancelfinetune) - Immediately cancel a fine-tune job. + +* [~~createAnswer~~](#createanswer) - Answers the specified question using the provided documents and examples. + +The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). + :warning: **Deprecated** +* [createChatCompletion](#createchatcompletion) - Creates a completion for the chat message +* [~~createClassification~~](#createclassification) - Classifies the specified `query` using provided examples. + +The endpoint first [searches](/docs/api-reference/searches) over the labeled examples +to select the ones most relevant for the particular query. Then, the relevant examples +are combined with the query to construct a prompt to produce the final label via the +[completions](/docs/api-reference/completions) endpoint. + +Labeled examples can be provided via an uploaded `file`, or explicitly listed in the +request using the `examples` parameter for quick tests and small scale use cases. + :warning: **Deprecated** +* [createCompletion](#createcompletion) - Creates a completion for the provided prompt and parameters +* [createEdit](#createedit) - Creates a new edit for the provided input, instruction, and parameters. +* [createEmbedding](#createembedding) - Creates an embedding vector representing the input text. +* [createFile](#createfile) - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. + +* [createFineTune](#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. + +Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + +[Learn more about Fine-tuning](/docs/guides/fine-tuning) + +* [createImage](#createimage) - Creates an image given a prompt. +* [createImageEdit](#createimageedit) - Creates an edited or extended image given an original image and a prompt. +* [createImageVariation](#createimagevariation) - Creates a variation of a given image. +* [createModeration](#createmoderation) - Classifies if text violates OpenAI's Content Policy +* [~~createSearch~~](#createsearch) - The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. + +To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. + +The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. + :warning: **Deprecated** +* [createTranscription](#createtranscription) - Transcribes audio into the input language. +* [createTranslation](#createtranslation) - Translates audio into into English. +* [deleteFile](#deletefile) - Delete a file. +* [deleteModel](#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization. +* [downloadFile](#downloadfile) - Returns the contents of the specified file +* [~~listEngines~~](#listengines) - Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. :warning: **Deprecated** +* [listFiles](#listfiles) - Returns a list of files that belong to the user's organization. +* [listFineTuneEvents](#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. + +* [listFineTunes](#listfinetunes) - List your organization's fine-tuning jobs + +* [listModels](#listmodels) - Lists the currently available models, and provides basic information about each one such as the owner and availability. +* [~~retrieveEngine~~](#retrieveengine) - Retrieves a model instance, providing basic information about it such as the owner and availability. :warning: **Deprecated** +* [retrieveFile](#retrievefile) - Returns information about a specific file. +* [retrieveFineTune](#retrievefinetune) - Gets info about the fine-tune job. + +[Learn more about Fine-tuning](/docs/guides/fine-tuning) + +* [retrieveModel](#retrievemodel) - Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + +## cancelFineTune + +Immediately cancel a fine-tune job. + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CancelFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.cancelFineTune({ + fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", +}).then((res: CancelFineTuneResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `request` | [operations.CancelFineTuneRequest](../../models/operations/cancelfinetunerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CancelFineTuneResponse](../../models/operations/cancelfinetuneresponse.md)>** + + +## ~~createAnswer~~ + +Answers the specified question using the provided documents and examples. + +The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). + + +> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateAnswerResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createAnswer({ + documents: [ + "provident", + "distinctio", + "quibusdam", + ], + examples: [ + [ + "corrupti", + "illum", + "vel", + "error", + ], + [ + "suscipit", + "iure", + "magnam", + ], + [ + "ipsa", + "delectus", + "tempora", + "suscipit", + ], + ], + examplesContext: "Ottawa, Canada's capital, is located in the east of southern Ontario, near the city of Montréal and the U.S. border.", + expand: [ + "minus", + "placeat", + ], + file: "voluptatum", + logitBias: "iusto", + logprobs: 568045, + maxRerank: 392785, + maxTokens: 925597, + model: "temporibus", + n: 71036, + question: "What is the capital of Japan?", + returnMetadata: "quis", + returnPrompt: false, + searchModel: "veritatis", + stop: [ + "["\n"]", + ], + temperature: 3682.41, + user: "repellendus", +}).then((res: CreateAnswerResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `request` | [shared.CreateAnswerRequest](../../models/shared/createanswerrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateAnswerResponse](../../models/operations/createanswerresponse.md)>** + + +## createChatCompletion + +Creates a completion for the chat message + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateChatCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; +import { ChatCompletionRequestMessageRole, ChatCompletionResponseMessageRole } from "@speakeasy-api/openai/dist/sdk/models/shared"; + +const sdk = new Gpt(); + +sdk.openAI.createChatCompletion({ + frequencyPenalty: 9571.56, + logitBias: {}, + maxTokens: 778157, + messages: [ + { + content: "at", + name: "Emilio Krajcik", + role: ChatCompletionRequestMessageRole.User, + }, + ], + model: "totam", + n: 1, + presencePenalty: 7805.29, + stop: [ + "nam", + ], + stream: false, + temperature: 1, + topP: 1, + user: "officia", +}).then((res: CreateChatCompletionResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `request` | [shared.CreateChatCompletionRequest](../../models/shared/createchatcompletionrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateChatCompletionResponse](../../models/operations/createchatcompletionresponse.md)>** + + +## ~~createClassification~~ + +Classifies the specified `query` using provided examples. + +The endpoint first [searches](/docs/api-reference/searches) over the labeled examples +to select the ones most relevant for the particular query. Then, the relevant examples +are combined with the query to construct a prompt to produce the final label via the +[completions](/docs/api-reference/completions) endpoint. + +Labeled examples can be provided via an uploaded `file`, or explicitly listed in the +request using the `examples` parameter for quick tests and small scale use cases. + + +> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateClassificationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createClassification({ + examples: [ + [ + "deleniti", + ], + [ + "optio", + "totam", + "beatae", + "commodi", + ], + [ + "modi", + "qui", + ], + ], + expand: "impedit", + file: "cum", + labels: [ + "ipsum", + "excepturi", + ], + logitBias: "aspernatur", + logprobs: "perferendis", + maxExamples: 324141, + model: "natus", + query: "The plot is not very attractive.", + returnMetadata: "sed", + returnPrompt: "iste", + searchModel: "dolor", + temperature: 0, + user: "natus", +}).then((res: CreateClassificationResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `request` | [shared.CreateClassificationRequest](../../models/shared/createclassificationrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateClassificationResponse](../../models/operations/createclassificationresponse.md)>** + + +## createCompletion + +Creates a completion for the provided prompt and parameters + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createCompletion({ + bestOf: 386489, + echo: false, + frequencyPenalty: 9437.49, + logitBias: {}, + logprobs: 902599, + maxTokens: 16, + model: "fuga", + n: 1, + presencePenalty: 4499.5, + prompt: [ + "This is a test.", + "This is a test.", + "This is a test.", + ], + stop: " +", + stream: false, + suffix: "test.", + temperature: 1, + topP: 1, + user: "user-1234", +}).then((res: CreateCompletionResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `request` | [shared.CreateCompletionRequest](../../models/shared/createcompletionrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateCompletionResponse](../../models/operations/createcompletionresponse.md)>** + + +## createEdit + +Creates a new edit for the provided input, instruction, and parameters. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateEditResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createEdit({ + input: "What day of the wek is it?", + instruction: "Fix the spelling mistakes.", + model: "saepe", + n: 1, + temperature: 1, + topP: 1, +}).then((res: CreateEditResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `request` | [shared.CreateEditRequest](../../models/shared/createeditrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateEditResponse](../../models/operations/createeditresponse.md)>** + + +## createEmbedding + +Creates an embedding vector representing the input text. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateEmbeddingResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createEmbedding({ + input: [ + 60225, + ], + model: "reiciendis", + user: "est", +}).then((res: CreateEmbeddingResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `request` | [shared.CreateEmbeddingRequest](../../models/shared/createembeddingrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateEmbeddingResponse](../../models/operations/createembeddingresponse.md)>** + + +## createFile + +Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createFile({ + file: { + content: "mollitia".encode(), + file: "laborum", + }, + purpose: "dolores", +}).then((res: CreateFileResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `request` | [shared.CreateFileRequest](../../models/shared/createfilerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateFileResponse](../../models/operations/createfileresponse.md)>** + + +## createFineTune + +Creates a job that fine-tunes a specified model from a given dataset. + +Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + +[Learn more about Fine-tuning](/docs/guides/fine-tuning) + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createFineTune({ + batchSize: 210382, + classificationBetas: [ + 1289.26, + 7506.86, + ], + classificationNClasses: 315428, + classificationPositiveClass: "omnis", + computeClassificationMetrics: false, + learningRateMultiplier: 3637.11, + model: "minima", + nEpochs: 570197, + promptLossWeight: 384.25, + suffix: "iure", + trainingFile: "file-ajSREls59WBbvgSzJSVWxMCB", + validationFile: "file-XjSREls59WBbvgSzJSVWxMCa", +}).then((res: CreateFineTuneResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `request` | [shared.CreateFineTuneRequest](../../models/shared/createfinetunerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateFineTuneResponse](../../models/operations/createfinetuneresponse.md)>** + + +## createImage + +Creates an image given a prompt. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateImageResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; +import { CreateImageRequestResponseFormat, CreateImageRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; + +const sdk = new Gpt(); + +sdk.openAI.createImage({ + n: 1, + prompt: "A cute baby sea otter", + responseFormat: CreateImageRequestResponseFormat.Url, + size: CreateImageRequestSize.OneThousandAndTwentyFourx1024, + user: "culpa", +}).then((res: CreateImageResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `request` | [shared.CreateImageRequest](../../models/shared/createimagerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateImageResponse](../../models/operations/createimageresponse.md)>** + + +## createImageEdit + +Creates an edited or extended image given an original image and a prompt. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateImageEditResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createImageEdit({ + image: { + content: "doloribus".encode(), + image: "sapiente", + }, + mask: { + content: "architecto".encode(), + mask: "mollitia", + }, + n: "dolorem", + prompt: "A cute baby sea otter wearing a beret", + responseFormat: "culpa", + size: "consequuntur", + user: "repellat", +}).then((res: CreateImageEditResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `request` | [shared.CreateImageEditRequest](../../models/shared/createimageeditrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateImageEditResponse](../../models/operations/createimageeditresponse.md)>** + + +## createImageVariation + +Creates a variation of a given image. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateImageVariationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createImageVariation({ + image: { + content: "mollitia".encode(), + image: "occaecati", + }, + n: "numquam", + responseFormat: "commodi", + size: "quam", + user: "molestiae", +}).then((res: CreateImageVariationResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `request` | [shared.CreateImageVariationRequest](../../models/shared/createimagevariationrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateImageVariationResponse](../../models/operations/createimagevariationresponse.md)>** + + +## createModeration + +Classifies if text violates OpenAI's Content Policy + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateModerationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createModeration({ + input: "I want to kill them.", + model: "text-moderation-stable", +}).then((res: CreateModerationResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `request` | [shared.CreateModerationRequest](../../models/shared/createmoderationrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateModerationResponse](../../models/operations/createmoderationresponse.md)>** + + +## ~~createSearch~~ + +The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. + +To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. + +The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. + + +> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateSearchResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createSearch({ + createSearchRequest: { + documents: [ + "quia", + "quis", + "vitae", + ], + file: "laborum", + maxRerank: 656330, + query: "the president", + returnMetadata: false, + user: "enim", + }, + engineId: "davinci", +}).then((res: CreateSearchResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `request` | [operations.CreateSearchRequest](../../models/operations/createsearchrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateSearchResponse](../../models/operations/createsearchresponse.md)>** + + +## createTranscription + +Transcribes audio into the input language. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateTranscriptionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createTranscription({ + file: { + content: "odit".encode(), + file: "quo", + }, + language: "sequi", + model: "tenetur", + prompt: "ipsam", + responseFormat: "id", + temperature: 8209.94, +}).then((res: CreateTranscriptionResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `request` | [shared.CreateTranscriptionRequest](../../models/shared/createtranscriptionrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateTranscriptionResponse](../../models/operations/createtranscriptionresponse.md)>** + + +## createTranslation + +Translates audio into into English. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateTranslationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.createTranslation({ + file: { + content: "aut".encode(), + file: "quasi", + }, + model: "error", + prompt: "temporibus", + responseFormat: "laborum", + temperature: 960.98, +}).then((res: CreateTranslationResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `request` | [shared.CreateTranslationRequest](../../models/shared/createtranslationrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateTranslationResponse](../../models/operations/createtranslationresponse.md)>** + + +## deleteFile + +Delete a file. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { DeleteFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.deleteFile({ + fileId: "reiciendis", +}).then((res: DeleteFileResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `request` | [operations.DeleteFileRequest](../../models/operations/deletefilerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.DeleteFileResponse](../../models/operations/deletefileresponse.md)>** + + +## deleteModel + +Delete a fine-tuned model. You must have the Owner role in your organization. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { DeleteModelResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.deleteModel({ + model: "curie:ft-acmeco-2021-03-03-21-44-20", +}).then((res: DeleteModelResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `request` | [operations.DeleteModelRequest](../../models/operations/deletemodelrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.DeleteModelResponse](../../models/operations/deletemodelresponse.md)>** + + +## downloadFile + +Returns the contents of the specified file + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { DownloadFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.downloadFile({ + fileId: "voluptatibus", +}).then((res: DownloadFileResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `request` | [operations.DownloadFileRequest](../../models/operations/downloadfilerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.DownloadFileResponse](../../models/operations/downloadfileresponse.md)>** + + +## ~~listEngines~~ + +Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. + +> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { ListEnginesResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.listEngines().then((res: ListEnginesResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.ListEnginesResponse](../../models/operations/listenginesresponse.md)>** + + +## listFiles + +Returns a list of files that belong to the user's organization. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { ListFilesResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.listFiles().then((res: ListFilesResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.ListFilesResponse](../../models/operations/listfilesresponse.md)>** + + +## listFineTuneEvents + +Get fine-grained status updates for a fine-tune job. + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { ListFineTuneEventsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.listFineTuneEvents({ + fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + stream: false, +}).then((res: ListFineTuneEventsResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | +| `request` | [operations.ListFineTuneEventsRequest](../../models/operations/listfinetuneeventsrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.ListFineTuneEventsResponse](../../models/operations/listfinetuneeventsresponse.md)>** + + +## listFineTunes + +List your organization's fine-tuning jobs + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { ListFineTunesResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.listFineTunes().then((res: ListFineTunesResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.ListFineTunesResponse](../../models/operations/listfinetunesresponse.md)>** + + +## listModels + +Lists the currently available models, and provides basic information about each one such as the owner and availability. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { ListModelsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.listModels().then((res: ListModelsResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.ListModelsResponse](../../models/operations/listmodelsresponse.md)>** + + +## ~~retrieveEngine~~ + +Retrieves a model instance, providing basic information about it such as the owner and availability. + +> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { RetrieveEngineResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.retrieveEngine({ + engineId: "davinci", +}).then((res: RetrieveEngineResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `request` | [operations.RetrieveEngineRequest](../../models/operations/retrieveenginerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.RetrieveEngineResponse](../../models/operations/retrieveengineresponse.md)>** + + +## retrieveFile + +Returns information about a specific file. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { RetrieveFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.retrieveFile({ + fileId: "vero", +}).then((res: RetrieveFileResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `request` | [operations.RetrieveFileRequest](../../models/operations/retrievefilerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.RetrieveFileResponse](../../models/operations/retrievefileresponse.md)>** + + +## retrieveFineTune + +Gets info about the fine-tune job. + +[Learn more about Fine-tuning](/docs/guides/fine-tuning) + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { RetrieveFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.retrieveFineTune({ + fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", +}).then((res: RetrieveFineTuneResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `request` | [operations.RetrieveFineTuneRequest](../../models/operations/retrievefinetunerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.RetrieveFineTuneResponse](../../models/operations/retrievefinetuneresponse.md)>** + + +## retrieveModel + +Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { RetrieveModelResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.retrieveModel({ + model: "text-davinci-001", +}).then((res: RetrieveModelResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `request` | [operations.RetrieveModelRequest](../../models/operations/retrievemodelrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.RetrieveModelResponse](../../models/operations/retrievemodelresponse.md)>** + diff --git a/files.gen b/files.gen index dba9da9..f063f51 100755 --- a/files.gen +++ b/files.gen @@ -89,6 +89,117 @@ src/sdk/models/shared/listfinetunesresponse.ts src/sdk/models/shared/listmodelsresponse.ts src/sdk/models/shared/model.ts src/sdk/models/shared/index.ts -docs/gpt/README.md -docs/openai/README.md -USAGE.md \ No newline at end of file +docs/sdks/gpt/README.md +docs/sdks/openai/README.md +USAGE.md +docs/models/operations/cancelfinetunerequest.md +docs/models/operations/cancelfinetuneresponse.md +docs/models/operations/createanswerresponse.md +docs/models/operations/createchatcompletionresponse.md +docs/models/operations/createclassificationresponse.md +docs/models/operations/createcompletionresponse.md +docs/models/operations/createeditresponse.md +docs/models/operations/createembeddingresponse.md +docs/models/operations/createfileresponse.md +docs/models/operations/createfinetuneresponse.md +docs/models/operations/createimageresponse.md +docs/models/operations/createimageeditresponse.md +docs/models/operations/createimagevariationresponse.md +docs/models/operations/createmoderationresponse.md +docs/models/operations/createsearchrequest.md +docs/models/operations/createsearchresponse.md +docs/models/operations/createtranscriptionresponse.md +docs/models/operations/createtranslationresponse.md +docs/models/operations/deletefilerequest.md +docs/models/operations/deletefileresponse.md +docs/models/operations/deletemodelrequest.md +docs/models/operations/deletemodelresponse.md +docs/models/operations/downloadfilerequest.md +docs/models/operations/downloadfileresponse.md +docs/models/operations/listenginesresponse.md +docs/models/operations/listfilesresponse.md +docs/models/operations/listfinetuneeventsrequest.md +docs/models/operations/listfinetuneeventsresponse.md +docs/models/operations/listfinetunesresponse.md +docs/models/operations/listmodelsresponse.md +docs/models/operations/retrieveenginerequest.md +docs/models/operations/retrieveengineresponse.md +docs/models/operations/retrievefilerequest.md +docs/models/operations/retrievefileresponse.md +docs/models/operations/retrievefinetunerequest.md +docs/models/operations/retrievefinetuneresponse.md +docs/models/operations/retrievemodelrequest.md +docs/models/operations/retrievemodelresponse.md +docs/models/shared/finetunehyperparams.md +docs/models/shared/finetune.md +docs/models/shared/openaifilestatusdetails.md +docs/models/shared/openaifile.md +docs/models/shared/finetuneevent.md +docs/models/shared/createanswerresponseselecteddocuments.md +docs/models/shared/createanswerresponse.md +docs/models/shared/createanswerrequest.md +docs/models/shared/createchatcompletionresponsechoices.md +docs/models/shared/createchatcompletionresponseusage.md +docs/models/shared/createchatcompletionresponse.md +docs/models/shared/chatcompletionresponsemessagerole.md +docs/models/shared/chatcompletionresponsemessage.md +docs/models/shared/createchatcompletionrequestlogitbias.md +docs/models/shared/createchatcompletionrequest.md +docs/models/shared/chatcompletionrequestmessagerole.md +docs/models/shared/chatcompletionrequestmessage.md +docs/models/shared/createclassificationresponseselectedexamples.md +docs/models/shared/createclassificationresponse.md +docs/models/shared/createclassificationrequest.md +docs/models/shared/createcompletionresponsechoiceslogprobstoplogprobs.md +docs/models/shared/createcompletionresponsechoiceslogprobs.md +docs/models/shared/createcompletionresponsechoices.md +docs/models/shared/createcompletionresponseusage.md +docs/models/shared/createcompletionresponse.md +docs/models/shared/createcompletionrequestlogitbias.md +docs/models/shared/createcompletionrequest.md +docs/models/shared/createeditresponsechoiceslogprobstoplogprobs.md +docs/models/shared/createeditresponsechoiceslogprobs.md +docs/models/shared/createeditresponsechoices.md +docs/models/shared/createeditresponseusage.md +docs/models/shared/createeditresponse.md +docs/models/shared/createeditrequest.md +docs/models/shared/createembeddingresponsedata.md +docs/models/shared/createembeddingresponseusage.md +docs/models/shared/createembeddingresponse.md +docs/models/shared/createembeddingrequest.md +docs/models/shared/createfilerequestfile.md +docs/models/shared/createfilerequest.md +docs/models/shared/createfinetunerequest.md +docs/models/shared/imagesresponsedata.md +docs/models/shared/imagesresponse.md +docs/models/shared/createimagerequestresponseformat.md +docs/models/shared/createimagerequestsize.md +docs/models/shared/createimagerequest.md +docs/models/shared/createimageeditrequestimage.md +docs/models/shared/createimageeditrequestmask.md +docs/models/shared/createimageeditrequest.md +docs/models/shared/createimagevariationrequestimage.md +docs/models/shared/createimagevariationrequest.md +docs/models/shared/createmoderationresponseresultscategories.md +docs/models/shared/createmoderationresponseresultscategoryscores.md +docs/models/shared/createmoderationresponseresults.md +docs/models/shared/createmoderationresponse.md +docs/models/shared/createmoderationrequest.md +docs/models/shared/createsearchresponsedata.md +docs/models/shared/createsearchresponse.md +docs/models/shared/createsearchrequest.md +docs/models/shared/createtranscriptionresponse.md +docs/models/shared/createtranscriptionrequestfile.md +docs/models/shared/createtranscriptionrequest.md +docs/models/shared/createtranslationresponse.md +docs/models/shared/createtranslationrequestfile.md +docs/models/shared/createtranslationrequest.md +docs/models/shared/deletefileresponse.md +docs/models/shared/deletemodelresponse.md +docs/models/shared/listenginesresponse.md +docs/models/shared/engine.md +docs/models/shared/listfilesresponse.md +docs/models/shared/listfinetuneeventsresponse.md +docs/models/shared/listfinetunesresponse.md +docs/models/shared/listmodelsresponse.md +docs/models/shared/model.md \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index d843212..cd662b3 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 5399f7767be93d4a4b8cecb9bbc687b3 docVersion: 1.2.0 - speakeasyVersion: 1.45.2 - generationVersion: 2.37.2 + speakeasyVersion: 1.47.0 + generationVersion: 2.39.0 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 1.10.0 + version: 1.11.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 6ddccf4..09bfe4b 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "1.10.0", + "version": "1.11.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "1.10.0", + "version": "1.11.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index bf4cf8b..66ca90f 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "1.10.0", + "version": "1.11.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/internal/utils/utils.ts b/src/internal/utils/utils.ts index 4388e25..dc6b289 100755 --- a/src/internal/utils/utils.ts +++ b/src/internal/utils/utils.ts @@ -176,10 +176,12 @@ export function templateUrl( params: Record ): string { let res: string = stringWithParams; - Object.entries(params).forEach(([key, value]) => { - const match: string = "{" + key + "}"; - res = res.replaceAll(match, value); - }); + if(params) { + Object.entries(params).forEach(([key, value]) => { + const match: string = "{" + key + "}"; + res = res.replaceAll(match, value); + }); + } return res; } diff --git a/src/sdk/models/shared/createchatcompletionrequest.ts b/src/sdk/models/shared/createchatcompletionrequest.ts index 2bd3ee1..edb1ad0 100755 --- a/src/sdk/models/shared/createchatcompletionrequest.ts +++ b/src/sdk/models/shared/createchatcompletionrequest.ts @@ -6,6 +6,16 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { ChatCompletionRequestMessage } from "./chatcompletionrequestmessage"; import { Expose, Type } from "class-transformer"; +/** + * Modify the likelihood of specified tokens appearing in the completion. + * + * @remarks + * + * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + * + */ +export class CreateChatCompletionRequestLogitBias extends SpeakeasyBase {} + export class CreateChatCompletionRequest extends SpeakeasyBase { /** * completions_frequency_penalty_description @@ -24,7 +34,8 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { */ @SpeakeasyMetadata() @Expose({ name: "logit_bias" }) - logitBias?: Record; + @Type(() => CreateChatCompletionRequestLogitBias) + logitBias?: CreateChatCompletionRequestLogitBias; /** * The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). diff --git a/src/sdk/models/shared/createcompletionrequest.ts b/src/sdk/models/shared/createcompletionrequest.ts index f13fd8a..e313914 100755 --- a/src/sdk/models/shared/createcompletionrequest.ts +++ b/src/sdk/models/shared/createcompletionrequest.ts @@ -3,7 +3,19 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; +import { Expose, Type } from "class-transformer"; + +/** + * Modify the likelihood of specified tokens appearing in the completion. + * + * @remarks + * + * Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + * + * As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + * + */ +export class CreateCompletionRequestLogitBias extends SpeakeasyBase {} export class CreateCompletionRequest extends SpeakeasyBase { /** @@ -54,7 +66,8 @@ export class CreateCompletionRequest extends SpeakeasyBase { */ @SpeakeasyMetadata() @Expose({ name: "logit_bias" }) - logitBias?: Record; + @Type(() => CreateCompletionRequestLogitBias) + logitBias?: CreateCompletionRequestLogitBias; /** * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. diff --git a/src/sdk/models/shared/createcompletionresponse.ts b/src/sdk/models/shared/createcompletionresponse.ts index cb99f53..bb6f68c 100755 --- a/src/sdk/models/shared/createcompletionresponse.ts +++ b/src/sdk/models/shared/createcompletionresponse.ts @@ -5,6 +5,8 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose, Type } from "class-transformer"; +export class CreateCompletionResponseChoicesLogprobsTopLogprobs extends SpeakeasyBase {} + export class CreateCompletionResponseChoicesLogprobs extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "text_offset" }) @@ -18,9 +20,10 @@ export class CreateCompletionResponseChoicesLogprobs extends SpeakeasyBase { @Expose({ name: "tokens" }) tokens?: string[]; - @SpeakeasyMetadata() + @SpeakeasyMetadata({ elemType: CreateCompletionResponseChoicesLogprobsTopLogprobs }) @Expose({ name: "top_logprobs" }) - topLogprobs?: Record[]; + @Type(() => CreateCompletionResponseChoicesLogprobsTopLogprobs) + topLogprobs?: CreateCompletionResponseChoicesLogprobsTopLogprobs[]; } export class CreateCompletionResponseChoices extends SpeakeasyBase { diff --git a/src/sdk/models/shared/createeditresponse.ts b/src/sdk/models/shared/createeditresponse.ts index b1820bb..bcd39db 100755 --- a/src/sdk/models/shared/createeditresponse.ts +++ b/src/sdk/models/shared/createeditresponse.ts @@ -5,6 +5,8 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose, Type } from "class-transformer"; +export class CreateEditResponseChoicesLogprobsTopLogprobs extends SpeakeasyBase {} + export class CreateEditResponseChoicesLogprobs extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "text_offset" }) @@ -18,9 +20,10 @@ export class CreateEditResponseChoicesLogprobs extends SpeakeasyBase { @Expose({ name: "tokens" }) tokens?: string[]; - @SpeakeasyMetadata() + @SpeakeasyMetadata({ elemType: CreateEditResponseChoicesLogprobsTopLogprobs }) @Expose({ name: "top_logprobs" }) - topLogprobs?: Record[]; + @Type(() => CreateEditResponseChoicesLogprobsTopLogprobs) + topLogprobs?: CreateEditResponseChoicesLogprobsTopLogprobs[]; } export class CreateEditResponseChoices extends SpeakeasyBase { diff --git a/src/sdk/models/shared/finetune.ts b/src/sdk/models/shared/finetune.ts index bfeb9a5..6ab53ab 100755 --- a/src/sdk/models/shared/finetune.ts +++ b/src/sdk/models/shared/finetune.ts @@ -7,6 +7,8 @@ import { FineTuneEvent } from "./finetuneevent"; import { OpenAIFile } from "./openaifile"; import { Expose, Type } from "class-transformer"; +export class FineTuneHyperparams extends SpeakeasyBase {} + /** * OK */ @@ -26,7 +28,8 @@ export class FineTune extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "hyperparams" }) - hyperparams: Record; + @Type(() => FineTuneHyperparams) + hyperparams: FineTuneHyperparams; @SpeakeasyMetadata() @Expose({ name: "id" }) diff --git a/src/sdk/models/shared/openaifile.ts b/src/sdk/models/shared/openaifile.ts index 08b1a8c..3c6dea6 100755 --- a/src/sdk/models/shared/openaifile.ts +++ b/src/sdk/models/shared/openaifile.ts @@ -3,7 +3,9 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; +import { Expose, Type } from "class-transformer"; + +export class OpenAIFileStatusDetails extends SpeakeasyBase {} /** * OK @@ -39,5 +41,6 @@ export class OpenAIFile extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "status_details" }) - statusDetails?: Record; + @Type(() => OpenAIFileStatusDetails) + statusDetails?: OpenAIFileStatusDetails; } diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 9f9a5e6..ed6ecd0 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "1.2.0"; - sdkVersion = "1.10.0"; - genVersion = "2.37.2"; + sdkVersion = "1.11.0"; + genVersion = "2.39.0"; public constructor(init?: Partial) { Object.assign(this, init); From 98ed2e83e619bb535dc63f54e81951d97c132312 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sun, 11 Jun 2023 01:21:24 +0000 Subject: [PATCH 03/66] ci: regenerated with OpenAPI Doc 1.2.0, Speakeay CLI 1.47.1 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 8c22a12..406a386 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -164,4 +164,12 @@ Based on: - OpenAPI Doc 1.2.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.47.0 (2.39.0) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v1.11.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.11.0 - . \ No newline at end of file +- [NPM v1.11.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.11.0 - . + +## 2023-06-11 01:21:03 +### Changes +Based on: +- OpenAPI Doc 1.2.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.47.1 (2.39.2) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v1.11.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.11.1 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index cd662b3..206ecce 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 5399f7767be93d4a4b8cecb9bbc687b3 docVersion: 1.2.0 - speakeasyVersion: 1.47.0 - generationVersion: 2.39.0 + speakeasyVersion: 1.47.1 + generationVersion: 2.39.2 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 1.11.0 + version: 1.11.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 09bfe4b..3a2f8a7 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "1.11.0", + "version": "1.11.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "1.11.0", + "version": "1.11.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 66ca90f..958ec3c 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "1.11.0", + "version": "1.11.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index ed6ecd0..5e377a4 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "1.2.0"; - sdkVersion = "1.11.0"; - genVersion = "2.39.0"; + sdkVersion = "1.11.1"; + genVersion = "2.39.2"; public constructor(init?: Partial) { Object.assign(this, init); From e8b94327e38499980a7f6544a0d93747ba3b64d0 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Mon, 12 Jun 2023 01:16:17 +0000 Subject: [PATCH 04/66] ci: regenerated with OpenAPI Doc 1.2.0, Speakeay CLI 1.47.1 --- RELEASES.md | 10 +++++++++- gen.yaml | 4 ++-- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 2 +- 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 406a386..72579c7 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -172,4 +172,12 @@ Based on: - OpenAPI Doc 1.2.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.47.1 (2.39.2) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v1.11.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.11.1 - . \ No newline at end of file +- [NPM v1.11.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.11.1 - . + +## 2023-06-12 01:15:56 +### Changes +Based on: +- OpenAPI Doc 1.2.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.47.1 (2.39.2) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v1.11.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.11.2 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 206ecce..aba3849 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,6 +1,6 @@ configVersion: 1.0.0 management: - docChecksum: 5399f7767be93d4a4b8cecb9bbc687b3 + docChecksum: deeec14f237b7f3737373e14747b7a04 docVersion: 1.2.0 speakeasyVersion: 1.47.1 generationVersion: 2.39.2 @@ -10,7 +10,7 @@ generation: singleTagPerOp: false telemetryEnabled: false typescript: - version: 1.11.1 + version: 1.11.2 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 3a2f8a7..d63fde8 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "1.11.1", + "version": "1.11.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "1.11.1", + "version": "1.11.2", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 958ec3c..098a51d 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "1.11.1", + "version": "1.11.2", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 5e377a4..cc9cc46 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,7 +38,7 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "1.2.0"; - sdkVersion = "1.11.1"; + sdkVersion = "1.11.2"; genVersion = "2.39.2"; public constructor(init?: Partial) { From 486a09b71b3ec6f428f38aad0b05bbfdb29c3168 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 14 Jun 2023 01:10:39 +0000 Subject: [PATCH 05/66] ci: regenerated with OpenAPI Doc 1.3.0, Speakeay CLI 1.47.3 --- README.md | 4 +- RELEASES.md | 10 +- docs/models/shared/chatcompletionfunctions.md | 10 + .../shared/chatcompletionrequestmessage.md | 11 +- ...hatcompletionrequestmessagefunctioncall.md | 11 + .../chatcompletionrequestmessagerole.md | 5 +- .../shared/chatcompletionresponsemessage.md | 9 +- ...atcompletionresponsemessagefunctioncall.md | 11 + .../chatcompletionresponsemessagerole.md | 3 +- docs/models/shared/createanswerrequest.md | 40 +-- .../shared/createchatcompletionrequest.md | 10 +- ...reatechatcompletionrequestfunctioncall1.md | 11 + ...reatechatcompletionrequestfunctioncall2.md | 10 + docs/models/shared/createcompletionrequest.md | 6 +- docs/models/shared/createembeddingrequest.md | 10 +- .../shared/createtranscriptionrequest.md | 2 +- .../models/shared/createtranslationrequest.md | 2 +- docs/sdks/openai/README.md | 274 +++++++++++------- files.gen | 6 + gen.yaml | 10 +- package-lock.json | 4 +- package.json | 2 +- src/internal/utils/retries.ts | 11 +- .../models/shared/chatcompletionfunctions.ts | 29 ++ .../shared/chatcompletionrequestmessage.ts | 40 ++- .../shared/chatcompletionresponsemessage.ts | 34 ++- src/sdk/models/shared/createanswerrequest.ts | 2 +- .../shared/createchatcompletionrequest.ts | 46 ++- .../models/shared/createcompletionrequest.ts | 6 +- .../models/shared/createembeddingrequest.ts | 2 +- .../shared/createtranscriptionrequest.ts | 2 +- .../models/shared/createtranslationrequest.ts | 2 +- src/sdk/models/shared/index.ts | 1 + src/sdk/openai.ts | 125 ++++++-- src/sdk/sdk.ts | 6 +- tsconfig.json | 1 + 36 files changed, 558 insertions(+), 210 deletions(-) create mode 100755 docs/models/shared/chatcompletionfunctions.md create mode 100755 docs/models/shared/chatcompletionrequestmessagefunctioncall.md create mode 100755 docs/models/shared/chatcompletionresponsemessagefunctioncall.md create mode 100755 docs/models/shared/createchatcompletionrequestfunctioncall1.md create mode 100755 docs/models/shared/createchatcompletionrequestfunctioncall2.md create mode 100755 src/sdk/models/shared/chatcompletionfunctions.ts diff --git a/README.md b/README.md index c8dbc88..02728f6 100755 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ sdk.openAI.cancelFineTune({ The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). :warning: **Deprecated** -* [createChatCompletion](docs/sdks/openai/README.md#createchatcompletion) - Creates a completion for the chat message +* [createChatCompletion](docs/sdks/openai/README.md#createchatcompletion) - Creates a model response for the given chat conversation. * [~~createClassification~~](docs/sdks/openai/README.md#createclassification) - Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples @@ -78,7 +78,7 @@ are combined with the query to construct a prompt to produce the final label via Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. :warning: **Deprecated** -* [createCompletion](docs/sdks/openai/README.md#createcompletion) - Creates a completion for the provided prompt and parameters +* [createCompletion](docs/sdks/openai/README.md#createcompletion) - Creates a completion for the provided prompt and parameters. * [createEdit](docs/sdks/openai/README.md#createedit) - Creates a new edit for the provided input, instruction, and parameters. * [createEmbedding](docs/sdks/openai/README.md#createembedding) - Creates an embedding vector representing the input text. * [createFile](docs/sdks/openai/README.md#createfile) - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. diff --git a/RELEASES.md b/RELEASES.md index 72579c7..0ebabb8 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -180,4 +180,12 @@ Based on: - OpenAPI Doc 1.2.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.47.1 (2.39.2) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v1.11.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.11.2 - . \ No newline at end of file +- [NPM v1.11.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.11.2 - . + +## 2023-06-14 01:10:14 +### Changes +Based on: +- OpenAPI Doc 1.3.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.47.3 (2.40.1) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v1.12.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.12.0 - . \ No newline at end of file diff --git a/docs/models/shared/chatcompletionfunctions.md b/docs/models/shared/chatcompletionfunctions.md new file mode 100755 index 0000000..6902cc1 --- /dev/null +++ b/docs/models/shared/chatcompletionfunctions.md @@ -0,0 +1,10 @@ +# ChatCompletionFunctions + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `description` | *string* | :heavy_minus_sign: | The description of what the function does. | +| `name` | *string* | :heavy_check_mark: | The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | +| `parameters` | Record | :heavy_minus_sign: | The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. | \ No newline at end of file diff --git a/docs/models/shared/chatcompletionrequestmessage.md b/docs/models/shared/chatcompletionrequestmessage.md index a4e7c3e..35ef172 100755 --- a/docs/models/shared/chatcompletionrequestmessage.md +++ b/docs/models/shared/chatcompletionrequestmessage.md @@ -3,8 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | -| `content` | *string* | :heavy_check_mark: | The contents of the message | -| `name` | *string* | :heavy_minus_sign: | The name of the user in a multi-user chat | -| `role` | [ChatCompletionRequestMessageRole](../../models/shared/chatcompletionrequestmessagerole.md) | :heavy_check_mark: | The role of the author of this message. | \ No newline at end of file +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `content` | *string* | :heavy_minus_sign: | The contents of the message. `content` is required for all messages except assistant messages with function calls. | +| `functionCall` | [ChatCompletionRequestMessageFunctionCall](../../models/shared/chatcompletionrequestmessagefunctioncall.md) | :heavy_minus_sign: | The name and arguments of a function that should be called, as generated by the model. | +| `name` | *string* | :heavy_minus_sign: | The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. | +| `role` | [ChatCompletionRequestMessageRole](../../models/shared/chatcompletionrequestmessagerole.md) | :heavy_check_mark: | The role of the messages author. One of `system`, `user`, `assistant`, or `function`. | \ No newline at end of file diff --git a/docs/models/shared/chatcompletionrequestmessagefunctioncall.md b/docs/models/shared/chatcompletionrequestmessagefunctioncall.md new file mode 100755 index 0000000..bf1c936 --- /dev/null +++ b/docs/models/shared/chatcompletionrequestmessagefunctioncall.md @@ -0,0 +1,11 @@ +# ChatCompletionRequestMessageFunctionCall + +The name and arguments of a function that should be called, as generated by the model. + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `arguments` | *string* | :heavy_minus_sign: | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | +| `name` | *string* | :heavy_minus_sign: | The name of the function to call. | \ No newline at end of file diff --git a/docs/models/shared/chatcompletionrequestmessagerole.md b/docs/models/shared/chatcompletionrequestmessagerole.md index c594a95..8a1e113 100755 --- a/docs/models/shared/chatcompletionrequestmessagerole.md +++ b/docs/models/shared/chatcompletionrequestmessagerole.md @@ -1,6 +1,6 @@ # ChatCompletionRequestMessageRole -The role of the author of this message. +The role of the messages author. One of `system`, `user`, `assistant`, or `function`. ## Values @@ -9,4 +9,5 @@ The role of the author of this message. | ----------- | ----------- | | `System` | system | | `User` | user | -| `Assistant` | assistant | \ No newline at end of file +| `Assistant` | assistant | +| `Function` | function | \ No newline at end of file diff --git a/docs/models/shared/chatcompletionresponsemessage.md b/docs/models/shared/chatcompletionresponsemessage.md index bbf084b..73d7345 100755 --- a/docs/models/shared/chatcompletionresponsemessage.md +++ b/docs/models/shared/chatcompletionresponsemessage.md @@ -3,7 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | -| `content` | *string* | :heavy_check_mark: | The contents of the message | -| `role` | [ChatCompletionResponseMessageRole](../../models/shared/chatcompletionresponsemessagerole.md) | :heavy_check_mark: | The role of the author of this message. | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `content` | *string* | :heavy_minus_sign: | The contents of the message. | +| `functionCall` | [ChatCompletionResponseMessageFunctionCall](../../models/shared/chatcompletionresponsemessagefunctioncall.md) | :heavy_minus_sign: | The name and arguments of a function that should be called, as generated by the model. | +| `role` | [ChatCompletionResponseMessageRole](../../models/shared/chatcompletionresponsemessagerole.md) | :heavy_check_mark: | The role of the author of this message. | \ No newline at end of file diff --git a/docs/models/shared/chatcompletionresponsemessagefunctioncall.md b/docs/models/shared/chatcompletionresponsemessagefunctioncall.md new file mode 100755 index 0000000..bd9ac54 --- /dev/null +++ b/docs/models/shared/chatcompletionresponsemessagefunctioncall.md @@ -0,0 +1,11 @@ +# ChatCompletionResponseMessageFunctionCall + +The name and arguments of a function that should be called, as generated by the model. + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `arguments` | *string* | :heavy_minus_sign: | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | +| `name` | *string* | :heavy_minus_sign: | The name of the function to call. | \ No newline at end of file diff --git a/docs/models/shared/chatcompletionresponsemessagerole.md b/docs/models/shared/chatcompletionresponsemessagerole.md index b6e1f69..44c97bf 100755 --- a/docs/models/shared/chatcompletionresponsemessagerole.md +++ b/docs/models/shared/chatcompletionresponsemessagerole.md @@ -9,4 +9,5 @@ The role of the author of this message. | ----------- | ----------- | | `System` | system | | `User` | user | -| `Assistant` | assistant | \ No newline at end of file +| `Assistant` | assistant | +| `Function` | function | \ No newline at end of file diff --git a/docs/models/shared/createanswerrequest.md b/docs/models/shared/createanswerrequest.md index 5e0ef20..f8588d6 100755 --- a/docs/models/shared/createanswerrequest.md +++ b/docs/models/shared/createanswerrequest.md @@ -3,23 +3,23 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `documents` | *string*[] | :heavy_minus_sign: | List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples.

You should specify either `documents` or a `file`, but not both.
| | -| `examples` | *string*[][] | :heavy_check_mark: | List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples. | | -| `examplesContext` | *string* | :heavy_check_mark: | A text snippet containing the contextual information used to generate the answers for the `examples` you provide. | Ottawa, Canada's capital, is located in the east of southern Ontario, near the city of Montréal and the U.S. border. | -| `expand` | *any*[] | :heavy_minus_sign: | If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion. | | -| `file` | *string* | :heavy_minus_sign: | The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose.

You should specify either `documents` or a `file`, but not both.
| | -| `logitBias` | *any* | :heavy_minus_sign: | N/A | | -| `logprobs` | *number* | :heavy_minus_sign: | Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.

The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case.

When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
| | -| `maxRerank` | *number* | :heavy_minus_sign: | The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. | | -| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens allowed for the generated answer | | -| `model` | *string* | :heavy_check_mark: | ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`. | | -| `n` | *number* | :heavy_minus_sign: | How many answers to generate for each question. | | -| `question` | *string* | :heavy_check_mark: | Question to get answered. | What is the capital of Japan? | -| `returnMetadata` | *any* | :heavy_minus_sign: | N/A | | -| `returnPrompt` | *boolean* | :heavy_minus_sign: | If set to `true`, the returned JSON will include a "prompt" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes. | | -| `searchModel` | *string* | :heavy_minus_sign: | ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`. | | -| `stop` | *any* | :heavy_minus_sign: | completions_stop_description | | -| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. | | -| `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `documents` | *string*[] | :heavy_minus_sign: | List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples.

You should specify either `documents` or a `file`, but not both.
| | +| `examples` | *string*[][] | :heavy_check_mark: | List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples. | | +| `examplesContext` | *string* | :heavy_check_mark: | A text snippet containing the contextual information used to generate the answers for the `examples` you provide. | Ottawa, Canada's capital, is located in the east of southern Ontario, near the city of Montréal and the U.S. border. | +| `expand` | *any*[] | :heavy_minus_sign: | If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion. | | +| `file` | *string* | :heavy_minus_sign: | The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose.

You should specify either `documents` or a `file`, but not both.
| | +| `logitBias` | *any* | :heavy_minus_sign: | N/A | | +| `logprobs` | *number* | :heavy_minus_sign: | Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.

The maximum value for `logprobs` is 5.

When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
| | +| `maxRerank` | *number* | :heavy_minus_sign: | The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. | | +| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens allowed for the generated answer | | +| `model` | *string* | :heavy_check_mark: | ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`. | | +| `n` | *number* | :heavy_minus_sign: | How many answers to generate for each question. | | +| `question` | *string* | :heavy_check_mark: | Question to get answered. | What is the capital of Japan? | +| `returnMetadata` | *any* | :heavy_minus_sign: | N/A | | +| `returnPrompt` | *boolean* | :heavy_minus_sign: | If set to `true`, the returned JSON will include a "prompt" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes. | | +| `searchModel` | *string* | :heavy_minus_sign: | ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`. | | +| `stop` | *any* | :heavy_minus_sign: | completions_stop_description | | +| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. | | +| `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionrequest.md b/docs/models/shared/createchatcompletionrequest.md index 1f2af27..a608083 100755 --- a/docs/models/shared/createchatcompletionrequest.md +++ b/docs/models/shared/createchatcompletionrequest.md @@ -6,14 +6,16 @@ | Field | Type | Required | Description | Example | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `frequencyPenalty` | *number* | :heavy_minus_sign: | completions_frequency_penalty_description | | +| `functionCall` | *any* | :heavy_minus_sign: | Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. | | +| `functions` | [ChatCompletionFunctions](../../models/shared/chatcompletionfunctions.md)[] | :heavy_minus_sign: | A list of functions the model may generate JSON inputs for. | | | `logitBias` | [CreateChatCompletionRequestLogitBias](../../models/shared/createchatcompletionrequestlogitbias.md) | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
| | -| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
| | -| `messages` | [ChatCompletionRequestMessage](../../models/shared/chatcompletionrequestmessage.md)[] | :heavy_check_mark: | The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction). | | -| `model` | *string* | :heavy_check_mark: | ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported. | | +| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the chat completion.

The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| | +| `messages` | [ChatCompletionRequestMessage](../../models/shared/chatcompletionrequestmessage.md)[] | :heavy_check_mark: | A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). | | +| `model` | *string* | :heavy_check_mark: | ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. | | | `n` | *number* | :heavy_minus_sign: | How many chat completion choices to generate for each input message. | 1 | | `presencePenalty` | *number* | :heavy_minus_sign: | completions_presence_penalty_description | | | `stop` | *any* | :heavy_minus_sign: | Up to 4 sequences where the API will stop generating further tokens.
| | -| `stream` | *boolean* | :heavy_minus_sign: | If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
| | +| `stream` | *boolean* | :heavy_minus_sign: | If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
| | | `temperature` | *number* | :heavy_minus_sign: | completions_temperature_description | 1 | | `topP` | *number* | :heavy_minus_sign: | completions_top_p_description | 1 | | `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionrequestfunctioncall1.md b/docs/models/shared/createchatcompletionrequestfunctioncall1.md new file mode 100755 index 0000000..43e51c8 --- /dev/null +++ b/docs/models/shared/createchatcompletionrequestfunctioncall1.md @@ -0,0 +1,11 @@ +# CreateChatCompletionRequestFunctionCall1 + +Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + + +## Values + +| Name | Value | +| ------ | ------ | +| `None` | none | +| `Auto` | auto | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionrequestfunctioncall2.md b/docs/models/shared/createchatcompletionrequestfunctioncall2.md new file mode 100755 index 0000000..0ab17eb --- /dev/null +++ b/docs/models/shared/createchatcompletionrequestfunctioncall2.md @@ -0,0 +1,10 @@ +# CreateChatCompletionRequestFunctionCall2 + +Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------- | --------------------------------- | --------------------------------- | --------------------------------- | +| `name` | *string* | :heavy_check_mark: | The name of the function to call. | \ No newline at end of file diff --git a/docs/models/shared/createcompletionrequest.md b/docs/models/shared/createcompletionrequest.md index 0554439..d29a9d8 100755 --- a/docs/models/shared/createcompletionrequest.md +++ b/docs/models/shared/createcompletionrequest.md @@ -9,14 +9,14 @@ | `echo` | *boolean* | :heavy_minus_sign: | Echo back the prompt in addition to the completion
| | | `frequencyPenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.

[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
| | | `logitBias` | [CreateCompletionRequestLogitBias](../../models/shared/createcompletionrequestlogitbias.md) | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.

As an example, you can pass `{"50256": -100}` to prevent the <\|endoftext\|> token from being generated.
| | -| `logprobs` | *number* | :heavy_minus_sign: | Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.

The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case.
| | -| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the completion.

The token count of your prompt plus `max_tokens` cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
| 16 | +| `logprobs` | *number* | :heavy_minus_sign: | Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.

The maximum value for `logprobs` is 5.
| | +| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the completion.

The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| 16 | | `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. | | | `n` | *number* | :heavy_minus_sign: | How many completions to generate for each prompt.

**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
| 1 | | `presencePenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.

[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
| | | `prompt` | *any* | :heavy_minus_sign: | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.

Note that <\|endoftext\|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
| | | `stop` | *any* | :heavy_minus_sign: | Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
| | -| `stream` | *boolean* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
| | +| `stream` | *boolean* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
| | | `suffix` | *string* | :heavy_minus_sign: | The suffix that comes after a completion of inserted text. | test. | | `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

We generally recommend altering this or `top_p` but not both.
| 1 | | `topP` | *number* | :heavy_minus_sign: | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.

We generally recommend altering this or `temperature` but not both.
| 1 | diff --git a/docs/models/shared/createembeddingrequest.md b/docs/models/shared/createembeddingrequest.md index 2545fe4..9ee2940 100755 --- a/docs/models/shared/createembeddingrequest.md +++ b/docs/models/shared/createembeddingrequest.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `input` | *any* | :heavy_check_mark: | Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length.
| -| `model` | *any* | :heavy_check_mark: | N/A | -| `user` | *any* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input` | *any* | :heavy_check_mark: | Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`). [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| +| `model` | *any* | :heavy_check_mark: | N/A | +| `user` | *any* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequest.md b/docs/models/shared/createtranscriptionrequest.md index 1e72ffa..9249593 100755 --- a/docs/models/shared/createtranscriptionrequest.md +++ b/docs/models/shared/createtranscriptionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `file` | [CreateTranscriptionRequestFile](../../models/shared/createtranscriptionrequestfile.md) | :heavy_check_mark: | The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
| +| `file` | [CreateTranscriptionRequestFile](../../models/shared/createtranscriptionrequestfile.md) | :heavy_check_mark: | The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
| | `language` | *string* | :heavy_minus_sign: | The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
| | `model` | *string* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| | `prompt` | *string* | :heavy_minus_sign: | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
| diff --git a/docs/models/shared/createtranslationrequest.md b/docs/models/shared/createtranslationrequest.md index 322e61e..07dc70a 100755 --- a/docs/models/shared/createtranslationrequest.md +++ b/docs/models/shared/createtranslationrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `file` | [CreateTranslationRequestFile](../../models/shared/createtranslationrequestfile.md) | :heavy_check_mark: | The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
| +| `file` | [CreateTranslationRequestFile](../../models/shared/createtranslationrequestfile.md) | :heavy_check_mark: | The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
| | `model` | *string* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| | `prompt` | *string* | :heavy_minus_sign: | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
| | `responseFormat` | *string* | :heavy_minus_sign: | The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
| diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index 58244b9..e5c5bea 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -12,7 +12,7 @@ The OpenAI REST API The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). :warning: **Deprecated** -* [createChatCompletion](#createchatcompletion) - Creates a completion for the chat message +* [createChatCompletion](#createchatcompletion) - Creates a model response for the given chat conversation. * [~~createClassification~~](#createclassification) - Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples @@ -23,7 +23,7 @@ are combined with the query to construct a prompt to produce the final label via Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases. :warning: **Deprecated** -* [createCompletion](#createcompletion) - Creates a completion for the provided prompt and parameters +* [createCompletion](#createcompletion) - Creates a completion for the provided prompt and parameters. * [createEdit](#createedit) - Creates a new edit for the provided input, instruction, and parameters. * [createEmbedding](#createembedding) - Creates an embedding vector representing the input text. * [createFile](#createfile) - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. @@ -184,38 +184,111 @@ sdk.openAI.createAnswer({ ## createChatCompletion -Creates a completion for the chat message +Creates a model response for the given chat conversation. ### Example Usage ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateChatCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { ChatCompletionRequestMessageRole, ChatCompletionResponseMessageRole } from "@speakeasy-api/openai/dist/sdk/models/shared"; +import { + ChatCompletionRequestMessageRole, + ChatCompletionResponseMessageRole, + CreateChatCompletionRequestFunctionCall1, +} from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); sdk.openAI.createChatCompletion({ frequencyPenalty: 9571.56, + functionCall: { + name: "Teri Strosin", + }, + functions: [ + { + description: "quod", + name: "Deanna Sauer MD", + parameters: { + "occaecati": "fugit", + "deleniti": "hic", + "optio": "totam", + }, + }, + { + description: "beatae", + name: "Tanya Gleason", + parameters: { + "esse": "ipsum", + "excepturi": "aspernatur", + "perferendis": "ad", + }, + }, + { + description: "natus", + name: "Sheryl Fadel", + parameters: { + "saepe": "fuga", + "in": "corporis", + "iste": "iure", + "saepe": "quidem", + }, + }, + { + description: "architecto", + name: "Lela Orn", + parameters: { + "dolorem": "corporis", + }, + }, + ], logitBias: {}, - maxTokens: 778157, + maxTokens: 128926, messages: [ { - content: "at", - name: "Emilio Krajcik", - role: ChatCompletionRequestMessageRole.User, + content: "enim", + functionCall: { + arguments: "omnis", + name: "Ms. Cathy Marks", + }, + name: "Darrin Brakus", + role: ChatCompletionRequestMessageRole.Assistant, + }, + { + content: "consequuntur", + functionCall: { + arguments: "repellat", + name: "Tracy Fritsch", + }, + name: "Shannon Mueller", + role: ChatCompletionRequestMessageRole.System, + }, + { + content: "laborum", + functionCall: { + arguments: "animi", + name: "Christina Satterfield", + }, + name: "Mr. Alberta Schuster", + role: ChatCompletionRequestMessageRole.Function, + }, + { + content: "laborum", + functionCall: { + arguments: "quasi", + name: "Jan Thiel", + }, + name: "Jose Moen", + role: ChatCompletionRequestMessageRole.System, }, ], - model: "totam", + model: "doloremque", n: 1, - presencePenalty: 7805.29, - stop: [ - "nam", - ], + presencePenalty: 4417.11, + stop: "maiores", stream: false, temperature: 1, topP: 1, - user: "officia", + user: "dicta", }).then((res: CreateChatCompletionResponse) => { if (res.statusCode == 200) { // handle response @@ -262,35 +335,30 @@ const sdk = new Gpt(); sdk.openAI.createClassification({ examples: [ [ - "deleniti", + "iusto", + "dicta", ], [ - "optio", - "totam", - "beatae", + "enim", + "accusamus", "commodi", ], - [ - "modi", - "qui", - ], ], - expand: "impedit", - file: "cum", + expand: "repudiandae", + file: "quae", labels: [ - "ipsum", - "excepturi", + "quidem", ], - logitBias: "aspernatur", - logprobs: "perferendis", - maxExamples: 324141, - model: "natus", + logitBias: "molestias", + logprobs: "excepturi", + maxExamples: 865103, + model: "modi", query: "The plot is not very attractive.", - returnMetadata: "sed", - returnPrompt: "iste", - searchModel: "dolor", + returnMetadata: "praesentium", + returnPrompt: "rem", + searchModel: "voluptates", temperature: 0, - user: "natus", + user: "quasi", }).then((res: CreateClassificationResponse) => { if (res.statusCode == 200) { // handle response @@ -313,7 +381,7 @@ sdk.openAI.createClassification({ ## createCompletion -Creates a completion for the provided prompt and parameters +Creates a completion for the provided prompt and parameters. ### Example Usage @@ -324,22 +392,24 @@ import { CreateCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/ const sdk = new Gpt(); sdk.openAI.createCompletion({ - bestOf: 386489, + bestOf: 921158, echo: false, - frequencyPenalty: 9437.49, + frequencyPenalty: 5759.47, logitBias: {}, - logprobs: 902599, + logprobs: 83112, maxTokens: 16, - model: "fuga", + model: "itaque", n: 1, - presencePenalty: 4499.5, + presencePenalty: 2777.18, prompt: [ "This is a test.", - "This is a test.", - "This is a test.", ], - stop: " -", + stop: [ + "["\n"]", + "["\n"]", + "["\n"]", + "["\n"]", + ], stream: false, suffix: "test.", temperature: 1, @@ -380,7 +450,7 @@ const sdk = new Gpt(); sdk.openAI.createEdit({ input: "What day of the wek is it?", instruction: "Fix the spelling mistakes.", - model: "saepe", + model: "explicabo", n: 1, temperature: 1, topP: 1, @@ -418,10 +488,12 @@ const sdk = new Gpt(); sdk.openAI.createEmbedding({ input: [ - 60225, + 841386, + 289406, + 264730, ], - model: "reiciendis", - user: "est", + model: "qui", + user: "aliquid", }).then((res: CreateEmbeddingResponse) => { if (res.statusCode == 200) { // handle response @@ -457,10 +529,10 @@ const sdk = new Gpt(); sdk.openAI.createFile({ file: { - content: "mollitia".encode(), - file: "laborum", + content: "cupiditate".encode(), + file: "quos", }, - purpose: "dolores", + purpose: "perferendis", }).then((res: CreateFileResponse) => { if (res.statusCode == 200) { // handle response @@ -499,19 +571,21 @@ import { CreateFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/op const sdk = new Gpt(); sdk.openAI.createFineTune({ - batchSize: 210382, + batchSize: 164940, classificationBetas: [ - 1289.26, - 7506.86, + 3698.08, + 46.95, + 1464.41, + 6778.17, ], - classificationNClasses: 315428, - classificationPositiveClass: "omnis", + classificationNClasses: 569618, + classificationPositiveClass: "tempora", computeClassificationMetrics: false, - learningRateMultiplier: 3637.11, - model: "minima", - nEpochs: 570197, - promptLossWeight: 384.25, - suffix: "iure", + learningRateMultiplier: 7037.37, + model: "tempore", + nEpochs: 288476, + promptLossWeight: 9621.89, + suffix: "eum", trainingFile: "file-ajSREls59WBbvgSzJSVWxMCB", validationFile: "file-XjSREls59WBbvgSzJSVWxMCa", }).then((res: CreateFineTuneResponse) => { @@ -552,7 +626,7 @@ sdk.openAI.createImage({ prompt: "A cute baby sea otter", responseFormat: CreateImageRequestResponseFormat.Url, size: CreateImageRequestSize.OneThousandAndTwentyFourx1024, - user: "culpa", + user: "non", }).then((res: CreateImageResponse) => { if (res.statusCode == 200) { // handle response @@ -587,18 +661,18 @@ const sdk = new Gpt(); sdk.openAI.createImageEdit({ image: { - content: "doloribus".encode(), - image: "sapiente", + content: "eligendi".encode(), + image: "sint", }, mask: { - content: "architecto".encode(), - mask: "mollitia", + content: "aliquid".encode(), + mask: "provident", }, - n: "dolorem", + n: "necessitatibus", prompt: "A cute baby sea otter wearing a beret", - responseFormat: "culpa", - size: "consequuntur", - user: "repellat", + responseFormat: "sint", + size: "officia", + user: "dolor", }).then((res: CreateImageEditResponse) => { if (res.statusCode == 200) { // handle response @@ -633,13 +707,13 @@ const sdk = new Gpt(); sdk.openAI.createImageVariation({ image: { - content: "mollitia".encode(), - image: "occaecati", + content: "debitis".encode(), + image: "a", }, - n: "numquam", - responseFormat: "commodi", - size: "quam", - user: "molestiae", + n: "dolorum", + responseFormat: "in", + size: "in", + user: "illum", }).then((res: CreateImageVariationResponse) => { if (res.statusCode == 200) { // handle response @@ -673,7 +747,11 @@ import { CreateModerationResponse } from "@speakeasy-api/openai/dist/sdk/models/ const sdk = new Gpt(); sdk.openAI.createModeration({ - input: "I want to kill them.", + input: [ + "I want to kill them.", + "I want to kill them.", + "I want to kill them.", + ], model: "text-moderation-stable", }).then((res: CreateModerationResponse) => { if (res.statusCode == 200) { @@ -717,15 +795,13 @@ const sdk = new Gpt(); sdk.openAI.createSearch({ createSearchRequest: { documents: [ - "quia", - "quis", - "vitae", + "magnam", ], - file: "laborum", - maxRerank: 656330, + file: "cumque", + maxRerank: 813798, query: "the president", returnMetadata: false, - user: "enim", + user: "ea", }, engineId: "davinci", }).then((res: CreateSearchResponse) => { @@ -762,14 +838,14 @@ const sdk = new Gpt(); sdk.openAI.createTranscription({ file: { - content: "odit".encode(), - file: "quo", + content: "aliquid".encode(), + file: "laborum", }, - language: "sequi", - model: "tenetur", - prompt: "ipsam", - responseFormat: "id", - temperature: 8209.94, + language: "accusamus", + model: "non", + prompt: "occaecati", + responseFormat: "enim", + temperature: 8817.36, }).then((res: CreateTranscriptionResponse) => { if (res.statusCode == 200) { // handle response @@ -804,13 +880,13 @@ const sdk = new Gpt(); sdk.openAI.createTranslation({ file: { - content: "aut".encode(), - file: "quasi", + content: "delectus".encode(), + file: "quidem", }, - model: "error", - prompt: "temporibus", - responseFormat: "laborum", - temperature: 960.98, + model: "provident", + prompt: "nam", + responseFormat: "id", + temperature: 5013.24, }).then((res: CreateTranslationResponse) => { if (res.statusCode == 200) { // handle response @@ -844,7 +920,7 @@ import { DeleteFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operat const sdk = new Gpt(); sdk.openAI.deleteFile({ - fileId: "reiciendis", + fileId: "deleniti", }).then((res: DeleteFileResponse) => { if (res.statusCode == 200) { // handle response @@ -912,7 +988,7 @@ import { DownloadFileResponse } from "@speakeasy-api/openai/dist/sdk/models/oper const sdk = new Gpt(); sdk.openAI.downloadFile({ - fileId: "voluptatibus", + fileId: "sapiente", }).then((res: DownloadFileResponse) => { if (res.statusCode == 200) { // handle response @@ -1145,7 +1221,7 @@ import { RetrieveFileResponse } from "@speakeasy-api/openai/dist/sdk/models/oper const sdk = new Gpt(); sdk.openAI.retrieveFile({ - fileId: "vero", + fileId: "amet", }).then((res: RetrieveFileResponse) => { if (res.statusCode == 200) { // handle response diff --git a/files.gen b/files.gen index f063f51..fe5f033 100755 --- a/files.gen +++ b/files.gen @@ -57,6 +57,7 @@ src/sdk/models/shared/createchatcompletionresponse.ts src/sdk/models/shared/chatcompletionresponsemessage.ts src/sdk/models/shared/createchatcompletionrequest.ts src/sdk/models/shared/chatcompletionrequestmessage.ts +src/sdk/models/shared/chatcompletionfunctions.ts src/sdk/models/shared/createclassificationresponse.ts src/sdk/models/shared/createclassificationrequest.ts src/sdk/models/shared/createcompletionresponse.ts @@ -141,12 +142,17 @@ docs/models/shared/createanswerrequest.md docs/models/shared/createchatcompletionresponsechoices.md docs/models/shared/createchatcompletionresponseusage.md docs/models/shared/createchatcompletionresponse.md +docs/models/shared/chatcompletionresponsemessagefunctioncall.md docs/models/shared/chatcompletionresponsemessagerole.md docs/models/shared/chatcompletionresponsemessage.md +docs/models/shared/createchatcompletionrequestfunctioncall2.md +docs/models/shared/createchatcompletionrequestfunctioncall1.md docs/models/shared/createchatcompletionrequestlogitbias.md docs/models/shared/createchatcompletionrequest.md +docs/models/shared/chatcompletionrequestmessagefunctioncall.md docs/models/shared/chatcompletionrequestmessagerole.md docs/models/shared/chatcompletionrequestmessage.md +docs/models/shared/chatcompletionfunctions.md docs/models/shared/createclassificationresponseselectedexamples.md docs/models/shared/createclassificationresponse.md docs/models/shared/createclassificationrequest.md diff --git a/gen.yaml b/gen.yaml index aba3849..b144372 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,16 +1,16 @@ configVersion: 1.0.0 management: - docChecksum: deeec14f237b7f3737373e14747b7a04 - docVersion: 1.2.0 - speakeasyVersion: 1.47.1 - generationVersion: 2.39.2 + docChecksum: e3499fc1d954655713f996bf9459a51c + docVersion: 1.3.0 + speakeasyVersion: 1.47.3 + generationVersion: 2.40.1 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 1.11.2 + version: 1.12.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index d63fde8..af28ea9 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "1.11.2", + "version": "1.12.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "1.11.2", + "version": "1.12.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 098a51d..1bb2bc5 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "1.11.2", + "version": "1.12.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/internal/utils/retries.ts b/src/internal/utils/retries.ts index 6c77b95..bc9d5e3 100755 --- a/src/internal/utils/retries.ts +++ b/src/internal/utils/retries.ts @@ -28,8 +28,13 @@ export class RetryConfig { backoff?: BackoffStrategy; retryConnectionErrors: boolean; - constructor(strategy: string, retryConnectionErrors = true) { + constructor( + strategy: string, + backoff?: BackoffStrategy, + retryConnectionErrors = true + ) { this.strategy = strategy; + this.backoff = backoff; this.retryConnectionErrors = retryConnectionErrors; } } @@ -50,6 +55,8 @@ class PermanentError extends Error { constructor(inner: unknown) { super("Permanent error"); this.inner = inner; + + Object.setPrototypeOf(this, PermanentError.prototype); } } @@ -59,6 +66,8 @@ class TemporaryError extends Error { constructor(res: AxiosResponse) { super("Temporary error"); this.res = res; + + Object.setPrototypeOf(this, TemporaryError.prototype); } } diff --git a/src/sdk/models/shared/chatcompletionfunctions.ts b/src/sdk/models/shared/chatcompletionfunctions.ts new file mode 100755 index 0000000..635426f --- /dev/null +++ b/src/sdk/models/shared/chatcompletionfunctions.ts @@ -0,0 +1,29 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Expose } from "class-transformer"; + +export class ChatCompletionFunctions extends SpeakeasyBase { + /** + * The description of what the function does. + */ + @SpeakeasyMetadata() + @Expose({ name: "description" }) + description?: string; + + /** + * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + */ + @SpeakeasyMetadata() + @Expose({ name: "name" }) + name: string; + + /** + * The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + */ + @SpeakeasyMetadata() + @Expose({ name: "parameters" }) + parameters?: Record; +} diff --git a/src/sdk/models/shared/chatcompletionrequestmessage.ts b/src/sdk/models/shared/chatcompletionrequestmessage.ts index 789ef5a..16ebe4c 100755 --- a/src/sdk/models/shared/chatcompletionrequestmessage.ts +++ b/src/sdk/models/shared/chatcompletionrequestmessage.ts @@ -3,34 +3,62 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; +import { Expose, Type } from "class-transformer"; /** - * The role of the author of this message. + * The name and arguments of a function that should be called, as generated by the model. + */ +export class ChatCompletionRequestMessageFunctionCall extends SpeakeasyBase { + /** + * The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + */ + @SpeakeasyMetadata() + @Expose({ name: "arguments" }) + arguments?: string; + + /** + * The name of the function to call. + */ + @SpeakeasyMetadata() + @Expose({ name: "name" }) + name?: string; +} + +/** + * The role of the messages author. One of `system`, `user`, `assistant`, or `function`. */ export enum ChatCompletionRequestMessageRole { System = "system", User = "user", Assistant = "assistant", + Function = "function", } export class ChatCompletionRequestMessage extends SpeakeasyBase { /** - * The contents of the message + * The contents of the message. `content` is required for all messages except assistant messages with function calls. */ @SpeakeasyMetadata() @Expose({ name: "content" }) - content: string; + content?: string; + + /** + * The name and arguments of a function that should be called, as generated by the model. + */ + @SpeakeasyMetadata() + @Expose({ name: "function_call" }) + @Type(() => ChatCompletionRequestMessageFunctionCall) + functionCall?: ChatCompletionRequestMessageFunctionCall; /** - * The name of the user in a multi-user chat + * The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. */ @SpeakeasyMetadata() @Expose({ name: "name" }) name?: string; /** - * The role of the author of this message. + * The role of the messages author. One of `system`, `user`, `assistant`, or `function`. */ @SpeakeasyMetadata() @Expose({ name: "role" }) diff --git a/src/sdk/models/shared/chatcompletionresponsemessage.ts b/src/sdk/models/shared/chatcompletionresponsemessage.ts index 1643401..74139ec 100755 --- a/src/sdk/models/shared/chatcompletionresponsemessage.ts +++ b/src/sdk/models/shared/chatcompletionresponsemessage.ts @@ -3,7 +3,26 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; +import { Expose, Type } from "class-transformer"; + +/** + * The name and arguments of a function that should be called, as generated by the model. + */ +export class ChatCompletionResponseMessageFunctionCall extends SpeakeasyBase { + /** + * The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + */ + @SpeakeasyMetadata() + @Expose({ name: "arguments" }) + arguments?: string; + + /** + * The name of the function to call. + */ + @SpeakeasyMetadata() + @Expose({ name: "name" }) + name?: string; +} /** * The role of the author of this message. @@ -12,15 +31,24 @@ export enum ChatCompletionResponseMessageRole { System = "system", User = "user", Assistant = "assistant", + Function = "function", } export class ChatCompletionResponseMessage extends SpeakeasyBase { /** - * The contents of the message + * The contents of the message. */ @SpeakeasyMetadata() @Expose({ name: "content" }) - content: string; + content?: string; + + /** + * The name and arguments of a function that should be called, as generated by the model. + */ + @SpeakeasyMetadata() + @Expose({ name: "function_call" }) + @Type(() => ChatCompletionResponseMessageFunctionCall) + functionCall?: ChatCompletionResponseMessageFunctionCall; /** * The role of the author of this message. diff --git a/src/sdk/models/shared/createanswerrequest.ts b/src/sdk/models/shared/createanswerrequest.ts index b91faee..bb0037a 100755 --- a/src/sdk/models/shared/createanswerrequest.ts +++ b/src/sdk/models/shared/createanswerrequest.ts @@ -60,7 +60,7 @@ export class CreateAnswerRequest extends SpeakeasyBase { * * @remarks * - * The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. + * The maximum value for `logprobs` is 5. * * When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs. * diff --git a/src/sdk/models/shared/createchatcompletionrequest.ts b/src/sdk/models/shared/createchatcompletionrequest.ts index edb1ad0..e350233 100755 --- a/src/sdk/models/shared/createchatcompletionrequest.ts +++ b/src/sdk/models/shared/createchatcompletionrequest.ts @@ -3,9 +3,30 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { ChatCompletionFunctions } from "./chatcompletionfunctions"; import { ChatCompletionRequestMessage } from "./chatcompletionrequestmessage"; import { Expose, Type } from "class-transformer"; +/** + * Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + */ +export class CreateChatCompletionRequestFunctionCall2 extends SpeakeasyBase { + /** + * The name of the function to call. + */ + @SpeakeasyMetadata() + @Expose({ name: "name" }) + name: string; +} + +/** + * Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + */ +export enum CreateChatCompletionRequestFunctionCall1 { + None = "none", + Auto = "auto", +} + /** * Modify the likelihood of specified tokens appearing in the completion. * @@ -24,6 +45,21 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { @Expose({ name: "frequency_penalty" }) frequencyPenalty?: number; + /** + * Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + */ + @SpeakeasyMetadata() + @Expose({ name: "function_call" }) + functionCall?: any; + + /** + * A list of functions the model may generate JSON inputs for. + */ + @SpeakeasyMetadata({ elemType: ChatCompletionFunctions }) + @Expose({ name: "functions" }) + @Type(() => ChatCompletionFunctions) + functions?: ChatCompletionFunctions[]; + /** * Modify the likelihood of specified tokens appearing in the completion. * @@ -38,17 +74,19 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { logitBias?: CreateChatCompletionRequestLogitBias; /** - * The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). + * The maximum number of [tokens](/tokenizer) to generate in the chat completion. * * @remarks * + * The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. + * */ @SpeakeasyMetadata() @Expose({ name: "max_tokens" }) maxTokens?: number; /** - * The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction). + * A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). */ @SpeakeasyMetadata({ elemType: ChatCompletionRequestMessage }) @Expose({ name: "messages" }) @@ -56,7 +94,7 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { messages: ChatCompletionRequestMessage[]; /** - * ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported. + * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. */ @SpeakeasyMetadata() @Expose({ name: "model" }) @@ -87,7 +125,7 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { stop?: any; /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). * * @remarks * diff --git a/src/sdk/models/shared/createcompletionrequest.ts b/src/sdk/models/shared/createcompletionrequest.ts index e313914..07e939c 100755 --- a/src/sdk/models/shared/createcompletionrequest.ts +++ b/src/sdk/models/shared/createcompletionrequest.ts @@ -74,7 +74,7 @@ export class CreateCompletionRequest extends SpeakeasyBase { * * @remarks * - * The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. + * The maximum value for `logprobs` is 5. * */ @SpeakeasyMetadata() @@ -86,7 +86,7 @@ export class CreateCompletionRequest extends SpeakeasyBase { * * @remarks * - * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096). + * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. * */ @SpeakeasyMetadata() @@ -147,7 +147,7 @@ export class CreateCompletionRequest extends SpeakeasyBase { stop?: any; /** - * Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. + * Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). * * @remarks * diff --git a/src/sdk/models/shared/createembeddingrequest.ts b/src/sdk/models/shared/createembeddingrequest.ts index f42371a..8c25804 100755 --- a/src/sdk/models/shared/createembeddingrequest.ts +++ b/src/sdk/models/shared/createembeddingrequest.ts @@ -7,7 +7,7 @@ import { Expose } from "class-transformer"; export class CreateEmbeddingRequest extends SpeakeasyBase { /** - * Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length. + * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`). [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. * * @remarks * diff --git a/src/sdk/models/shared/createtranscriptionrequest.ts b/src/sdk/models/shared/createtranscriptionrequest.ts index feb1ac4..daf233c 100755 --- a/src/sdk/models/shared/createtranscriptionrequest.ts +++ b/src/sdk/models/shared/createtranscriptionrequest.ts @@ -14,7 +14,7 @@ export class CreateTranscriptionRequestFile extends SpeakeasyBase { export class CreateTranscriptionRequest extends SpeakeasyBase { /** - * The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. + * The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. * * @remarks * diff --git a/src/sdk/models/shared/createtranslationrequest.ts b/src/sdk/models/shared/createtranslationrequest.ts index 19c555c..7c7dccd 100755 --- a/src/sdk/models/shared/createtranslationrequest.ts +++ b/src/sdk/models/shared/createtranslationrequest.ts @@ -14,7 +14,7 @@ export class CreateTranslationRequestFile extends SpeakeasyBase { export class CreateTranslationRequest extends SpeakeasyBase { /** - * The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. + * The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. * * @remarks * diff --git a/src/sdk/models/shared/index.ts b/src/sdk/models/shared/index.ts index e70595e..0342142 100755 --- a/src/sdk/models/shared/index.ts +++ b/src/sdk/models/shared/index.ts @@ -2,6 +2,7 @@ * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. */ +export * from "./chatcompletionfunctions"; export * from "./chatcompletionrequestmessage"; export * from "./chatcompletionresponsemessage"; export * from "./createanswerrequest"; diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index 5a8a4ef..f3daa91 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -49,6 +49,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", ...config, }); @@ -63,10 +64,11 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { - res.fineTune = utils.objectToClass(httpRes?.data, shared.FineTune); + res.fineTune = utils.objectToClass(JSON.parse(decodedRes), shared.FineTune); } break; } @@ -121,6 +123,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -136,11 +139,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.createAnswerResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.CreateAnswerResponse ); } @@ -151,7 +155,7 @@ export class OpenAI { } /** - * Creates a completion for the chat message + * Creates a model response for the given chat conversation. */ async createChatCompletion( req: shared.CreateChatCompletionRequest, @@ -192,6 +196,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -208,11 +213,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.createChatCompletionResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.CreateChatCompletionResponse ); } @@ -275,6 +281,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -291,11 +298,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.createClassificationResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.CreateClassificationResponse ); } @@ -306,7 +314,7 @@ export class OpenAI { } /** - * Creates a completion for the provided prompt and parameters + * Creates a completion for the provided prompt and parameters. */ async createCompletion( req: shared.CreateCompletionRequest, @@ -347,6 +355,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -362,11 +371,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.createCompletionResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.CreateCompletionResponse ); } @@ -418,6 +428,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -433,11 +444,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.createEditResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.CreateEditResponse ); } @@ -489,6 +501,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -504,11 +517,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.createEmbeddingResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.CreateEmbeddingResponse ); } @@ -561,6 +575,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -576,10 +591,11 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { - res.openAIFile = utils.objectToClass(httpRes?.data, shared.OpenAIFile); + res.openAIFile = utils.objectToClass(JSON.parse(decodedRes), shared.OpenAIFile); } break; } @@ -634,6 +650,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -649,10 +666,11 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { - res.fineTune = utils.objectToClass(httpRes?.data, shared.FineTune); + res.fineTune = utils.objectToClass(JSON.parse(decodedRes), shared.FineTune); } break; } @@ -702,6 +720,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -717,10 +736,14 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { - res.imagesResponse = utils.objectToClass(httpRes?.data, shared.ImagesResponse); + res.imagesResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.ImagesResponse + ); } break; } @@ -770,6 +793,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -785,10 +809,14 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { - res.imagesResponse = utils.objectToClass(httpRes?.data, shared.ImagesResponse); + res.imagesResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.ImagesResponse + ); } break; } @@ -838,6 +866,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -854,10 +883,14 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { - res.imagesResponse = utils.objectToClass(httpRes?.data, shared.ImagesResponse); + res.imagesResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.ImagesResponse + ); } break; } @@ -907,6 +940,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -922,11 +956,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.createModerationResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.CreateModerationResponse ); } @@ -989,6 +1024,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -1004,11 +1040,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.createSearchResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.CreateSearchResponse ); } @@ -1060,6 +1097,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -1076,11 +1114,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.createTranscriptionResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.CreateTranscriptionResponse ); } @@ -1132,6 +1171,7 @@ export class OpenAI { url: url, method: "post", headers: headers, + responseType: "arraybuffer", data: reqBody, ...config, }); @@ -1147,11 +1187,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.createTranslationResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.CreateTranslationResponse ); } @@ -1191,6 +1232,7 @@ export class OpenAI { url: url, method: "delete", headers: headers, + responseType: "arraybuffer", ...config, }); @@ -1205,11 +1247,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.deleteFileResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.DeleteFileResponse ); } @@ -1249,6 +1292,7 @@ export class OpenAI { url: url, method: "delete", headers: headers, + responseType: "arraybuffer", ...config, }); @@ -1263,11 +1307,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.deleteModelResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.DeleteModelResponse ); } @@ -1307,6 +1352,7 @@ export class OpenAI { url: url, method: "get", headers: headers, + responseType: "arraybuffer", ...config, }); @@ -1321,10 +1367,11 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { - res.downloadFile200ApplicationJSONString = JSON.stringify(httpRes?.data); + res.downloadFile200ApplicationJSONString = decodedRes; } break; } @@ -1357,6 +1404,7 @@ export class OpenAI { url: url, method: "get", headers: headers, + responseType: "arraybuffer", ...config, }); @@ -1371,11 +1419,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.listEnginesResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.ListEnginesResponse ); } @@ -1408,6 +1457,7 @@ export class OpenAI { url: url, method: "get", headers: headers, + responseType: "arraybuffer", ...config, }); @@ -1422,11 +1472,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.listFilesResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.ListFilesResponse ); } @@ -1468,6 +1519,7 @@ export class OpenAI { url: url + queryParams, method: "get", headers: headers, + responseType: "arraybuffer", ...config, }); @@ -1483,11 +1535,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.listFineTuneEventsResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.ListFineTuneEventsResponse ); } @@ -1521,6 +1574,7 @@ export class OpenAI { url: url, method: "get", headers: headers, + responseType: "arraybuffer", ...config, }); @@ -1535,11 +1589,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.listFineTunesResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.ListFineTunesResponse ); } @@ -1572,6 +1627,7 @@ export class OpenAI { url: url, method: "get", headers: headers, + responseType: "arraybuffer", ...config, }); @@ -1586,11 +1642,12 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.listModelsResponse = utils.objectToClass( - httpRes?.data, + JSON.parse(decodedRes), shared.ListModelsResponse ); } @@ -1632,6 +1689,7 @@ export class OpenAI { url: url, method: "get", headers: headers, + responseType: "arraybuffer", ...config, }); @@ -1646,10 +1704,11 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { - res.engine = utils.objectToClass(httpRes?.data, shared.Engine); + res.engine = utils.objectToClass(JSON.parse(decodedRes), shared.Engine); } break; } @@ -1687,6 +1746,7 @@ export class OpenAI { url: url, method: "get", headers: headers, + responseType: "arraybuffer", ...config, }); @@ -1701,10 +1761,11 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { - res.openAIFile = utils.objectToClass(httpRes?.data, shared.OpenAIFile); + res.openAIFile = utils.objectToClass(JSON.parse(decodedRes), shared.OpenAIFile); } break; } @@ -1745,6 +1806,7 @@ export class OpenAI { url: url, method: "get", headers: headers, + responseType: "arraybuffer", ...config, }); @@ -1759,10 +1821,11 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { - res.fineTune = utils.objectToClass(httpRes?.data, shared.FineTune); + res.fineTune = utils.objectToClass(JSON.parse(decodedRes), shared.FineTune); } break; } @@ -1800,6 +1863,7 @@ export class OpenAI { url: url, method: "get", headers: headers, + responseType: "arraybuffer", ...config, }); @@ -1814,10 +1878,11 @@ export class OpenAI { contentType: contentType, rawResponse: httpRes, }); + const decodedRes = new TextDecoder().decode(httpRes?.data); switch (true) { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { - res.model = utils.objectToClass(httpRes?.data, shared.Model); + res.model = utils.objectToClass(JSON.parse(decodedRes), shared.Model); } break; } diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index cc9cc46..6db3e4b 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -37,9 +37,9 @@ export class SDKConfiguration { serverURL: string; serverDefaults: any; language = "typescript"; - openapiDocVersion = "1.2.0"; - sdkVersion = "1.11.2"; - genVersion = "2.39.2"; + openapiDocVersion = "1.3.0"; + sdkVersion = "1.12.0"; + genVersion = "2.40.1"; public constructor(init?: Partial) { Object.assign(this, init); diff --git a/tsconfig.json b/tsconfig.json index 0f5face..0e90e5e 100755 --- a/tsconfig.json +++ b/tsconfig.json @@ -6,6 +6,7 @@ "rootDir": "src", "outDir": "dist", "allowJs": true, + "downlevelIteration": true, "skipLibCheck": true, "esModuleInterop": true, "allowSyntheticDefaultImports": true, From 8a8b611d5abecb5115f9b094b768c321d2189f5e Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Thu, 15 Jun 2023 01:11:10 +0000 Subject: [PATCH 06/66] ci: regenerated with OpenAPI Doc 1.3.0, Speakeay CLI 1.47.4 --- RELEASES.md | 10 +++++++++- docs/models/shared/createcompletionrequest.md | 2 +- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/models/shared/createcompletionrequest.ts | 2 +- src/sdk/sdk.ts | 2 +- 7 files changed, 18 insertions(+), 10 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 0ebabb8..9030251 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -188,4 +188,12 @@ Based on: - OpenAPI Doc 1.3.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.47.3 (2.40.1) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v1.12.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.12.0 - . \ No newline at end of file +- [NPM v1.12.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.12.0 - . + +## 2023-06-15 01:10:40 +### Changes +Based on: +- OpenAPI Doc 1.3.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.47.4 (2.40.1) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v1.12.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.12.1 - . \ No newline at end of file diff --git a/docs/models/shared/createcompletionrequest.md b/docs/models/shared/createcompletionrequest.md index d29a9d8..b05347e 100755 --- a/docs/models/shared/createcompletionrequest.md +++ b/docs/models/shared/createcompletionrequest.md @@ -14,7 +14,7 @@ | `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. | | | `n` | *number* | :heavy_minus_sign: | How many completions to generate for each prompt.

**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
| 1 | | `presencePenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.

[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
| | -| `prompt` | *any* | :heavy_minus_sign: | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.

Note that <\|endoftext\|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
| | +| `prompt` | *any* | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.

Note that <\|endoftext\|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
| | | `stop` | *any* | :heavy_minus_sign: | Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
| | | `stream` | *boolean* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
| | | `suffix` | *string* | :heavy_minus_sign: | The suffix that comes after a completion of inserted text. | test. | diff --git a/gen.yaml b/gen.yaml index b144372..762de99 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,8 +1,8 @@ configVersion: 1.0.0 management: - docChecksum: e3499fc1d954655713f996bf9459a51c + docChecksum: f3e31350bed5fa1756e115770b429911 docVersion: 1.3.0 - speakeasyVersion: 1.47.3 + speakeasyVersion: 1.47.4 generationVersion: 2.40.1 generation: sdkClassName: gpt @@ -10,7 +10,7 @@ generation: singleTagPerOp: false telemetryEnabled: false typescript: - version: 1.12.0 + version: 1.12.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index af28ea9..9215f5d 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "1.12.0", + "version": "1.12.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "1.12.0", + "version": "1.12.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 1bb2bc5..e29c7dc 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "1.12.0", + "version": "1.12.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/createcompletionrequest.ts b/src/sdk/models/shared/createcompletionrequest.ts index 07e939c..528f8af 100755 --- a/src/sdk/models/shared/createcompletionrequest.ts +++ b/src/sdk/models/shared/createcompletionrequest.ts @@ -134,7 +134,7 @@ export class CreateCompletionRequest extends SpeakeasyBase { */ @SpeakeasyMetadata() @Expose({ name: "prompt" }) - prompt?: any; + prompt: any; /** * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 6db3e4b..1260013 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,7 +38,7 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "1.3.0"; - sdkVersion = "1.12.0"; + sdkVersion = "1.12.1"; genVersion = "2.40.1"; public constructor(init?: Partial) { From a9e1b2e2b9419b4940293d8940327456c5262689 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 16 Jun 2023 01:11:49 +0000 Subject: [PATCH 07/66] ci: regenerated with OpenAPI Doc 1.3.0, Speakeay CLI 1.48.0 --- RELEASES.md | 10 +++++++++- docs/models/shared/createcompletionresponsechoices.md | 8 ++++---- files.gen | 1 - gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/models/shared/createcompletionresponse.ts | 8 ++++---- src/sdk/sdk.ts | 4 ++-- 8 files changed, 26 insertions(+), 19 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 9030251..9b5ec05 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -196,4 +196,12 @@ Based on: - OpenAPI Doc 1.3.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.47.4 (2.40.1) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v1.12.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.12.1 - . \ No newline at end of file +- [NPM v1.12.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.12.1 - . + +## 2023-06-16 01:11:27 +### Changes +Based on: +- OpenAPI Doc 1.3.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.48.0 (2.41.1) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v1.13.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.13.0 - . \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponsechoices.md b/docs/models/shared/createcompletionresponsechoices.md index 2bddd11..2a292fb 100755 --- a/docs/models/shared/createcompletionresponsechoices.md +++ b/docs/models/shared/createcompletionresponsechoices.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | -| `finishReason` | *string* | :heavy_minus_sign: | N/A | -| `index` | *number* | :heavy_minus_sign: | N/A | -| `logprobs` | [CreateCompletionResponseChoicesLogprobs](../../models/shared/createcompletionresponsechoiceslogprobs.md) | :heavy_minus_sign: | N/A | -| `text` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `finishReason` | *string* | :heavy_check_mark: | N/A | +| `index` | *number* | :heavy_check_mark: | N/A | +| `logprobs` | [CreateCompletionResponseChoicesLogprobs](../../models/shared/createcompletionresponsechoiceslogprobs.md) | :heavy_check_mark: | N/A | +| `text` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/files.gen b/files.gen index fe5f033..577f1d4 100755 --- a/files.gen +++ b/files.gen @@ -1,7 +1,6 @@ src/sdk/openai.ts src/sdk/sdk.ts .eslintrc.yml -.gitignore jest.config.js package-lock.json package.json diff --git a/gen.yaml b/gen.yaml index 762de99..1ca5ba8 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,16 +1,16 @@ configVersion: 1.0.0 management: - docChecksum: f3e31350bed5fa1756e115770b429911 + docChecksum: 8da487e08e51d83b6eb9534383bc3242 docVersion: 1.3.0 - speakeasyVersion: 1.47.4 - generationVersion: 2.40.1 + speakeasyVersion: 1.48.0 + generationVersion: 2.41.1 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 1.12.1 + version: 1.13.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 9215f5d..d0ae49e 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "1.12.1", + "version": "1.13.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "1.12.1", + "version": "1.13.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index e29c7dc..72b281a 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "1.12.1", + "version": "1.13.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/createcompletionresponse.ts b/src/sdk/models/shared/createcompletionresponse.ts index bb6f68c..dc3437b 100755 --- a/src/sdk/models/shared/createcompletionresponse.ts +++ b/src/sdk/models/shared/createcompletionresponse.ts @@ -29,20 +29,20 @@ export class CreateCompletionResponseChoicesLogprobs extends SpeakeasyBase { export class CreateCompletionResponseChoices extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "finish_reason" }) - finishReason?: string; + finishReason: string; @SpeakeasyMetadata() @Expose({ name: "index" }) - index?: number; + index: number; @SpeakeasyMetadata() @Expose({ name: "logprobs" }) @Type(() => CreateCompletionResponseChoicesLogprobs) - logprobs?: CreateCompletionResponseChoicesLogprobs; + logprobs: CreateCompletionResponseChoicesLogprobs; @SpeakeasyMetadata() @Expose({ name: "text" }) - text?: string; + text: string; } export class CreateCompletionResponseUsage extends SpeakeasyBase { diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 1260013..a2c99b9 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "1.3.0"; - sdkVersion = "1.12.1"; - genVersion = "2.40.1"; + sdkVersion = "1.13.0"; + genVersion = "2.41.1"; public constructor(init?: Partial) { Object.assign(this, init); From e43a8c2870bf5fbf97e6a3f1d360aaac26b75164 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sat, 17 Jun 2023 01:07:20 +0000 Subject: [PATCH 08/66] ci: regenerated with OpenAPI Doc 1.3.0, Speakeay CLI 1.48.0 --- RELEASES.md | 10 +++++++++- .../shared/createchatcompletionresponsechoices.md | 10 +++++----- ...reatechatcompletionresponsechoicesfinishreason.md | 10 ++++++++++ .../models/shared/createcompletionresponsechoices.md | 12 ++++++------ .../createcompletionresponsechoicesfinishreason.md | 9 +++++++++ docs/models/shared/createeditresponsechoices.md | 12 ++++++------ .../shared/createeditresponsechoicesfinishreason.md | 9 +++++++++ docs/sdks/openai/README.md | 3 +++ files.gen | 3 +++ gen.yaml | 4 ++-- package-lock.json | 4 ++-- package.json | 2 +- .../models/shared/createchatcompletionresponse.ts | 8 +++++++- src/sdk/models/shared/createcompletionresponse.ts | 7 ++++++- src/sdk/models/shared/createeditresponse.ts | 7 ++++++- src/sdk/sdk.ts | 2 +- 16 files changed, 85 insertions(+), 27 deletions(-) create mode 100755 docs/models/shared/createchatcompletionresponsechoicesfinishreason.md create mode 100755 docs/models/shared/createcompletionresponsechoicesfinishreason.md create mode 100755 docs/models/shared/createeditresponsechoicesfinishreason.md diff --git a/RELEASES.md b/RELEASES.md index 9b5ec05..6bf2faa 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -204,4 +204,12 @@ Based on: - OpenAPI Doc 1.3.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.48.0 (2.41.1) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v1.13.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.13.0 - . \ No newline at end of file +- [NPM v1.13.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.13.0 - . + +## 2023-06-17 01:06:55 +### Changes +Based on: +- OpenAPI Doc 1.3.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.48.0 (2.41.1) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v1.13.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.13.1 - . \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionresponsechoices.md b/docs/models/shared/createchatcompletionresponsechoices.md index 96a3ded..0de5ed7 100755 --- a/docs/models/shared/createchatcompletionresponsechoices.md +++ b/docs/models/shared/createchatcompletionresponsechoices.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | -| `finishReason` | *string* | :heavy_minus_sign: | N/A | -| `index` | *number* | :heavy_minus_sign: | N/A | -| `message` | [ChatCompletionResponseMessage](../../models/shared/chatcompletionresponsemessage.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | +| `finishReason` | [CreateChatCompletionResponseChoicesFinishReason](../../models/shared/createchatcompletionresponsechoicesfinishreason.md) | :heavy_minus_sign: | N/A | +| `index` | *number* | :heavy_minus_sign: | N/A | +| `message` | [ChatCompletionResponseMessage](../../models/shared/chatcompletionresponsemessage.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md b/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md new file mode 100755 index 0000000..74f5f7d --- /dev/null +++ b/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md @@ -0,0 +1,10 @@ +# CreateChatCompletionResponseChoicesFinishReason + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `Stop` | stop | +| `Length` | length | +| `FunctionCall` | function_call | \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponsechoices.md b/docs/models/shared/createcompletionresponsechoices.md index 2a292fb..41858e2 100755 --- a/docs/models/shared/createcompletionresponsechoices.md +++ b/docs/models/shared/createcompletionresponsechoices.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | -| `finishReason` | *string* | :heavy_check_mark: | N/A | -| `index` | *number* | :heavy_check_mark: | N/A | -| `logprobs` | [CreateCompletionResponseChoicesLogprobs](../../models/shared/createcompletionresponsechoiceslogprobs.md) | :heavy_check_mark: | N/A | -| `text` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | +| `finishReason` | [CreateCompletionResponseChoicesFinishReason](../../models/shared/createcompletionresponsechoicesfinishreason.md) | :heavy_check_mark: | N/A | +| `index` | *number* | :heavy_check_mark: | N/A | +| `logprobs` | [CreateCompletionResponseChoicesLogprobs](../../models/shared/createcompletionresponsechoiceslogprobs.md) | :heavy_check_mark: | N/A | +| `text` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponsechoicesfinishreason.md b/docs/models/shared/createcompletionresponsechoicesfinishreason.md new file mode 100755 index 0000000..18d1a23 --- /dev/null +++ b/docs/models/shared/createcompletionresponsechoicesfinishreason.md @@ -0,0 +1,9 @@ +# CreateCompletionResponseChoicesFinishReason + + +## Values + +| Name | Value | +| -------- | -------- | +| `Stop` | stop | +| `Length` | length | \ No newline at end of file diff --git a/docs/models/shared/createeditresponsechoices.md b/docs/models/shared/createeditresponsechoices.md index b59fe1d..358667d 100755 --- a/docs/models/shared/createeditresponsechoices.md +++ b/docs/models/shared/createeditresponsechoices.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | -| `finishReason` | *string* | :heavy_minus_sign: | N/A | -| `index` | *number* | :heavy_minus_sign: | N/A | -| `logprobs` | [CreateEditResponseChoicesLogprobs](../../models/shared/createeditresponsechoiceslogprobs.md) | :heavy_minus_sign: | N/A | -| `text` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | +| `finishReason` | [CreateEditResponseChoicesFinishReason](../../models/shared/createeditresponsechoicesfinishreason.md) | :heavy_minus_sign: | N/A | +| `index` | *number* | :heavy_minus_sign: | N/A | +| `logprobs` | [CreateEditResponseChoicesLogprobs](../../models/shared/createeditresponsechoiceslogprobs.md) | :heavy_minus_sign: | N/A | +| `text` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createeditresponsechoicesfinishreason.md b/docs/models/shared/createeditresponsechoicesfinishreason.md new file mode 100755 index 0000000..74b87d8 --- /dev/null +++ b/docs/models/shared/createeditresponsechoicesfinishreason.md @@ -0,0 +1,9 @@ +# CreateEditResponseChoicesFinishReason + + +## Values + +| Name | Value | +| -------- | -------- | +| `Stop` | stop | +| `Length` | length | \ No newline at end of file diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index e5c5bea..bb3c2f6 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -195,6 +195,7 @@ import { ChatCompletionRequestMessageRole, ChatCompletionResponseMessageRole, CreateChatCompletionRequestFunctionCall1, + CreateChatCompletionResponseChoicesFinishReason, } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); @@ -388,6 +389,7 @@ Creates a completion for the provided prompt and parameters. ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; +import { CreateCompletionResponseChoicesFinishReason } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); @@ -444,6 +446,7 @@ Creates a new edit for the provided input, instruction, and parameters. ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateEditResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; +import { CreateEditResponseChoicesFinishReason } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); diff --git a/files.gen b/files.gen index 577f1d4..9956f3b 100755 --- a/files.gen +++ b/files.gen @@ -138,6 +138,7 @@ docs/models/shared/finetuneevent.md docs/models/shared/createanswerresponseselecteddocuments.md docs/models/shared/createanswerresponse.md docs/models/shared/createanswerrequest.md +docs/models/shared/createchatcompletionresponsechoicesfinishreason.md docs/models/shared/createchatcompletionresponsechoices.md docs/models/shared/createchatcompletionresponseusage.md docs/models/shared/createchatcompletionresponse.md @@ -155,6 +156,7 @@ docs/models/shared/chatcompletionfunctions.md docs/models/shared/createclassificationresponseselectedexamples.md docs/models/shared/createclassificationresponse.md docs/models/shared/createclassificationrequest.md +docs/models/shared/createcompletionresponsechoicesfinishreason.md docs/models/shared/createcompletionresponsechoiceslogprobstoplogprobs.md docs/models/shared/createcompletionresponsechoiceslogprobs.md docs/models/shared/createcompletionresponsechoices.md @@ -162,6 +164,7 @@ docs/models/shared/createcompletionresponseusage.md docs/models/shared/createcompletionresponse.md docs/models/shared/createcompletionrequestlogitbias.md docs/models/shared/createcompletionrequest.md +docs/models/shared/createeditresponsechoicesfinishreason.md docs/models/shared/createeditresponsechoiceslogprobstoplogprobs.md docs/models/shared/createeditresponsechoiceslogprobs.md docs/models/shared/createeditresponsechoices.md diff --git a/gen.yaml b/gen.yaml index 1ca5ba8..13347c6 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,6 +1,6 @@ configVersion: 1.0.0 management: - docChecksum: 8da487e08e51d83b6eb9534383bc3242 + docChecksum: d0ce7d708c8bcb95aae605d27c99cb19 docVersion: 1.3.0 speakeasyVersion: 1.48.0 generationVersion: 2.41.1 @@ -10,7 +10,7 @@ generation: singleTagPerOp: false telemetryEnabled: false typescript: - version: 1.13.0 + version: 1.13.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index d0ae49e..035449f 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "1.13.0", + "version": "1.13.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "1.13.0", + "version": "1.13.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 72b281a..6d5c6e8 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "1.13.0", + "version": "1.13.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/createchatcompletionresponse.ts b/src/sdk/models/shared/createchatcompletionresponse.ts index 5e3fae0..27dffc7 100755 --- a/src/sdk/models/shared/createchatcompletionresponse.ts +++ b/src/sdk/models/shared/createchatcompletionresponse.ts @@ -6,10 +6,16 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { ChatCompletionResponseMessage } from "./chatcompletionresponsemessage"; import { Expose, Type } from "class-transformer"; +export enum CreateChatCompletionResponseChoicesFinishReason { + Stop = "stop", + Length = "length", + FunctionCall = "function_call", +} + export class CreateChatCompletionResponseChoices extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "finish_reason" }) - finishReason?: string; + finishReason?: CreateChatCompletionResponseChoicesFinishReason; @SpeakeasyMetadata() @Expose({ name: "index" }) diff --git a/src/sdk/models/shared/createcompletionresponse.ts b/src/sdk/models/shared/createcompletionresponse.ts index dc3437b..5e0f34d 100755 --- a/src/sdk/models/shared/createcompletionresponse.ts +++ b/src/sdk/models/shared/createcompletionresponse.ts @@ -5,6 +5,11 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose, Type } from "class-transformer"; +export enum CreateCompletionResponseChoicesFinishReason { + Stop = "stop", + Length = "length", +} + export class CreateCompletionResponseChoicesLogprobsTopLogprobs extends SpeakeasyBase {} export class CreateCompletionResponseChoicesLogprobs extends SpeakeasyBase { @@ -29,7 +34,7 @@ export class CreateCompletionResponseChoicesLogprobs extends SpeakeasyBase { export class CreateCompletionResponseChoices extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "finish_reason" }) - finishReason: string; + finishReason: CreateCompletionResponseChoicesFinishReason; @SpeakeasyMetadata() @Expose({ name: "index" }) diff --git a/src/sdk/models/shared/createeditresponse.ts b/src/sdk/models/shared/createeditresponse.ts index bcd39db..6a52f8e 100755 --- a/src/sdk/models/shared/createeditresponse.ts +++ b/src/sdk/models/shared/createeditresponse.ts @@ -5,6 +5,11 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose, Type } from "class-transformer"; +export enum CreateEditResponseChoicesFinishReason { + Stop = "stop", + Length = "length", +} + export class CreateEditResponseChoicesLogprobsTopLogprobs extends SpeakeasyBase {} export class CreateEditResponseChoicesLogprobs extends SpeakeasyBase { @@ -29,7 +34,7 @@ export class CreateEditResponseChoicesLogprobs extends SpeakeasyBase { export class CreateEditResponseChoices extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "finish_reason" }) - finishReason?: string; + finishReason?: CreateEditResponseChoicesFinishReason; @SpeakeasyMetadata() @Expose({ name: "index" }) diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index a2c99b9..785e5e7 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,7 +38,7 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "1.3.0"; - sdkVersion = "1.13.0"; + sdkVersion = "1.13.1"; genVersion = "2.41.1"; public constructor(init?: Partial) { From e9c8676dc7207802265b4cc009151c0e4470de11 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Tue, 20 Jun 2023 01:09:10 +0000 Subject: [PATCH 09/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.49.0 --- README.md | 24 +- RELEASES.md | 10 +- .../models/operations/createanswerresponse.md | 11 - .../createclassificationresponse.md | 11 - docs/models/operations/createsearchrequest.md | 9 - .../models/operations/createsearchresponse.md | 11 - docs/models/operations/listenginesresponse.md | 11 - .../operations/retrieveenginerequest.md | 8 - .../operations/retrieveengineresponse.md | 11 - docs/models/shared/createanswerrequest.md | 25 - docs/models/shared/createanswerresponse.md | 15 - .../createanswerresponseselecteddocuments.md | 9 - .../shared/createchatcompletionrequest.md | 2 +- .../createchatcompletionrequestmodel2.md | 17 + .../shared/createclassificationrequest.md | 21 - .../shared/createclassificationresponse.md | 15 - ...eclassificationresponseselectedexamples.md | 10 - docs/models/shared/createcompletionrequest.md | 2 +- .../shared/createcompletionrequestmodel2.md | 17 + docs/models/shared/createeditrequest.md | 2 +- docs/models/shared/createeditrequestmodel2.md | 11 + docs/models/shared/createembeddingrequest.md | 2 +- .../shared/createembeddingrequestmodel2.md | 10 + docs/models/shared/createfinetunerequest.md | 2 +- .../shared/createfinetunerequestmodel2.md | 17 + docs/models/shared/createmoderationrequest.md | 8 +- .../shared/createmoderationrequestmodel2.md | 14 + docs/models/shared/createsearchrequest.md | 13 - docs/models/shared/createsearchresponse.md | 12 - .../models/shared/createsearchresponsedata.md | 10 - .../shared/createtranscriptionrequest.md | 2 +- .../createtranscriptionrequestmodel2.md | 11 + .../models/shared/createtranslationrequest.md | 2 +- .../shared/createtranslationrequestmodel2.md | 11 + docs/models/shared/engine.md | 13 - docs/models/shared/listenginesresponse.md | 11 - docs/sdks/openai/README.md | 514 ++++-------------- files.gen | 39 +- gen.yaml | 10 +- package-lock.json | 4 +- package.json | 2 +- src/sdk/models/operations/createanswer.ts | 24 - .../models/operations/createclassification.ts | 24 - src/sdk/models/operations/createsearch.ts | 35 -- src/sdk/models/operations/index.ts | 5 - src/sdk/models/operations/listengines.ts | 24 - src/sdk/models/operations/retrieveengine.ts | 35 -- src/sdk/models/shared/createanswerrequest.ts | 142 ----- src/sdk/models/shared/createanswerresponse.ts | 46 -- .../shared/createchatcompletionrequest.ts | 16 +- .../shared/createclassificationrequest.ts | 96 ---- .../shared/createclassificationresponse.ts | 50 -- .../models/shared/createcompletionrequest.ts | 21 +- src/sdk/models/shared/createeditrequest.ts | 10 +- .../models/shared/createembeddingrequest.ts | 10 + .../models/shared/createfinetunerequest.ts | 18 +- .../models/shared/createmoderationrequest.ts | 15 +- src/sdk/models/shared/createsearchrequest.ts | 69 --- src/sdk/models/shared/createsearchresponse.ts | 38 -- .../shared/createtranscriptionrequest.ts | 14 +- .../models/shared/createtranslationrequest.ts | 14 +- src/sdk/models/shared/engine.ts | 27 - src/sdk/models/shared/index.ts | 8 - src/sdk/models/shared/listenginesresponse.ts | 21 - src/sdk/openai.ts | 363 +------------ src/sdk/sdk.ts | 6 +- 66 files changed, 361 insertions(+), 1729 deletions(-) delete mode 100755 docs/models/operations/createanswerresponse.md delete mode 100755 docs/models/operations/createclassificationresponse.md delete mode 100755 docs/models/operations/createsearchrequest.md delete mode 100755 docs/models/operations/createsearchresponse.md delete mode 100755 docs/models/operations/listenginesresponse.md delete mode 100755 docs/models/operations/retrieveenginerequest.md delete mode 100755 docs/models/operations/retrieveengineresponse.md delete mode 100755 docs/models/shared/createanswerrequest.md delete mode 100755 docs/models/shared/createanswerresponse.md delete mode 100755 docs/models/shared/createanswerresponseselecteddocuments.md create mode 100755 docs/models/shared/createchatcompletionrequestmodel2.md delete mode 100755 docs/models/shared/createclassificationrequest.md delete mode 100755 docs/models/shared/createclassificationresponse.md delete mode 100755 docs/models/shared/createclassificationresponseselectedexamples.md create mode 100755 docs/models/shared/createcompletionrequestmodel2.md create mode 100755 docs/models/shared/createeditrequestmodel2.md create mode 100755 docs/models/shared/createembeddingrequestmodel2.md create mode 100755 docs/models/shared/createfinetunerequestmodel2.md create mode 100755 docs/models/shared/createmoderationrequestmodel2.md delete mode 100755 docs/models/shared/createsearchrequest.md delete mode 100755 docs/models/shared/createsearchresponse.md delete mode 100755 docs/models/shared/createsearchresponsedata.md create mode 100755 docs/models/shared/createtranscriptionrequestmodel2.md create mode 100755 docs/models/shared/createtranslationrequestmodel2.md delete mode 100755 docs/models/shared/engine.md delete mode 100755 docs/models/shared/listenginesresponse.md delete mode 100755 src/sdk/models/operations/createanswer.ts delete mode 100755 src/sdk/models/operations/createclassification.ts delete mode 100755 src/sdk/models/operations/createsearch.ts delete mode 100755 src/sdk/models/operations/listengines.ts delete mode 100755 src/sdk/models/operations/retrieveengine.ts delete mode 100755 src/sdk/models/shared/createanswerrequest.ts delete mode 100755 src/sdk/models/shared/createanswerresponse.ts delete mode 100755 src/sdk/models/shared/createclassificationrequest.ts delete mode 100755 src/sdk/models/shared/createclassificationresponse.ts delete mode 100755 src/sdk/models/shared/createsearchrequest.ts delete mode 100755 src/sdk/models/shared/createsearchresponse.ts delete mode 100755 src/sdk/models/shared/engine.ts delete mode 100755 src/sdk/models/shared/listenginesresponse.ts diff --git a/README.md b/README.md index 02728f6..9e9f3be 100755 --- a/README.md +++ b/README.md @@ -63,21 +63,7 @@ sdk.openAI.cancelFineTune({ * [cancelFineTune](docs/sdks/openai/README.md#cancelfinetune) - Immediately cancel a fine-tune job. -* [~~createAnswer~~](docs/sdks/openai/README.md#createanswer) - Answers the specified question using the provided documents and examples. - -The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). - :warning: **Deprecated** * [createChatCompletion](docs/sdks/openai/README.md#createchatcompletion) - Creates a model response for the given chat conversation. -* [~~createClassification~~](docs/sdks/openai/README.md#createclassification) - Classifies the specified `query` using provided examples. - -The endpoint first [searches](/docs/api-reference/searches) over the labeled examples -to select the ones most relevant for the particular query. Then, the relevant examples -are combined with the query to construct a prompt to produce the final label via the -[completions](/docs/api-reference/completions) endpoint. - -Labeled examples can be provided via an uploaded `file`, or explicitly listed in the -request using the `examples` parameter for quick tests and small scale use cases. - :warning: **Deprecated** * [createCompletion](docs/sdks/openai/README.md#createcompletion) - Creates a completion for the provided prompt and parameters. * [createEdit](docs/sdks/openai/README.md#createedit) - Creates a new edit for the provided input, instruction, and parameters. * [createEmbedding](docs/sdks/openai/README.md#createembedding) - Creates an embedding vector representing the input text. @@ -93,25 +79,17 @@ Response includes details of the enqueued job including job status and the name * [createImageEdit](docs/sdks/openai/README.md#createimageedit) - Creates an edited or extended image given an original image and a prompt. * [createImageVariation](docs/sdks/openai/README.md#createimagevariation) - Creates a variation of a given image. * [createModeration](docs/sdks/openai/README.md#createmoderation) - Classifies if text violates OpenAI's Content Policy -* [~~createSearch~~](docs/sdks/openai/README.md#createsearch) - The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. - -To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. - -The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. - :warning: **Deprecated** * [createTranscription](docs/sdks/openai/README.md#createtranscription) - Transcribes audio into the input language. -* [createTranslation](docs/sdks/openai/README.md#createtranslation) - Translates audio into into English. +* [createTranslation](docs/sdks/openai/README.md#createtranslation) - Translates audio into English. * [deleteFile](docs/sdks/openai/README.md#deletefile) - Delete a file. * [deleteModel](docs/sdks/openai/README.md#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization. * [downloadFile](docs/sdks/openai/README.md#downloadfile) - Returns the contents of the specified file -* [~~listEngines~~](docs/sdks/openai/README.md#listengines) - Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. :warning: **Deprecated** * [listFiles](docs/sdks/openai/README.md#listfiles) - Returns a list of files that belong to the user's organization. * [listFineTuneEvents](docs/sdks/openai/README.md#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. * [listFineTunes](docs/sdks/openai/README.md#listfinetunes) - List your organization's fine-tuning jobs * [listModels](docs/sdks/openai/README.md#listmodels) - Lists the currently available models, and provides basic information about each one such as the owner and availability. -* [~~retrieveEngine~~](docs/sdks/openai/README.md#retrieveengine) - Retrieves a model instance, providing basic information about it such as the owner and availability. :warning: **Deprecated** * [retrieveFile](docs/sdks/openai/README.md#retrievefile) - Returns information about a specific file. * [retrieveFineTune](docs/sdks/openai/README.md#retrievefinetune) - Gets info about the fine-tune job. diff --git a/RELEASES.md b/RELEASES.md index 6bf2faa..0fcf71a 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -212,4 +212,12 @@ Based on: - OpenAPI Doc 1.3.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.48.0 (2.41.1) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v1.13.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.13.1 - . \ No newline at end of file +- [NPM v1.13.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/1.13.1 - . + +## 2023-06-20 01:08:50 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.49.0 (2.41.4) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.0.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.0.0 - . \ No newline at end of file diff --git a/docs/models/operations/createanswerresponse.md b/docs/models/operations/createanswerresponse.md deleted file mode 100755 index 54cf7fa..0000000 --- a/docs/models/operations/createanswerresponse.md +++ /dev/null @@ -1,11 +0,0 @@ -# CreateAnswerResponse - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | -| `createAnswerResponse` | [shared.CreateAnswerResponse](../../models/shared/createanswerresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createclassificationresponse.md b/docs/models/operations/createclassificationresponse.md deleted file mode 100755 index 7f08699..0000000 --- a/docs/models/operations/createclassificationresponse.md +++ /dev/null @@ -1,11 +0,0 @@ -# CreateClassificationResponse - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `contentType` | *string* | :heavy_check_mark: | N/A | -| `createClassificationResponse` | [shared.CreateClassificationResponse](../../models/shared/createclassificationresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createsearchrequest.md b/docs/models/operations/createsearchrequest.md deleted file mode 100755 index a7ad86a..0000000 --- a/docs/models/operations/createsearchrequest.md +++ /dev/null @@ -1,9 +0,0 @@ -# CreateSearchRequest - - -## Fields - -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -| `createSearchRequest` | [shared.CreateSearchRequest](../../models/shared/createsearchrequest.md) | :heavy_check_mark: | N/A | | -| `engineId` | *string* | :heavy_check_mark: | The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`. | davinci | \ No newline at end of file diff --git a/docs/models/operations/createsearchresponse.md b/docs/models/operations/createsearchresponse.md deleted file mode 100755 index 5306a56..0000000 --- a/docs/models/operations/createsearchresponse.md +++ /dev/null @@ -1,11 +0,0 @@ -# CreateSearchResponse - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | -| `createSearchResponse` | [shared.CreateSearchResponse](../../models/shared/createsearchresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listenginesresponse.md b/docs/models/operations/listenginesresponse.md deleted file mode 100755 index c81d050..0000000 --- a/docs/models/operations/listenginesresponse.md +++ /dev/null @@ -1,11 +0,0 @@ -# ListEnginesResponse - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | -| `contentType` | *string* | :heavy_check_mark: | N/A | -| `listEnginesResponse` | [shared.ListEnginesResponse](../../models/shared/listenginesresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/retrieveenginerequest.md b/docs/models/operations/retrieveenginerequest.md deleted file mode 100755 index 1f9e3a3..0000000 --- a/docs/models/operations/retrieveenginerequest.md +++ /dev/null @@ -1,8 +0,0 @@ -# RetrieveEngineRequest - - -## Fields - -| Field | Type | Required | Description | Example | -| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | -| `engineId` | *string* | :heavy_check_mark: | The ID of the engine to use for this request
| davinci | \ No newline at end of file diff --git a/docs/models/operations/retrieveengineresponse.md b/docs/models/operations/retrieveengineresponse.md deleted file mode 100755 index a26b597..0000000 --- a/docs/models/operations/retrieveengineresponse.md +++ /dev/null @@ -1,11 +0,0 @@ -# RetrieveEngineResponse - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | -| `engine` | [shared.Engine](../../models/shared/engine.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createanswerrequest.md b/docs/models/shared/createanswerrequest.md deleted file mode 100755 index f8588d6..0000000 --- a/docs/models/shared/createanswerrequest.md +++ /dev/null @@ -1,25 +0,0 @@ -# CreateAnswerRequest - - -## Fields - -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `documents` | *string*[] | :heavy_minus_sign: | List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples.

You should specify either `documents` or a `file`, but not both.
| | -| `examples` | *string*[][] | :heavy_check_mark: | List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples. | | -| `examplesContext` | *string* | :heavy_check_mark: | A text snippet containing the contextual information used to generate the answers for the `examples` you provide. | Ottawa, Canada's capital, is located in the east of southern Ontario, near the city of Montréal and the U.S. border. | -| `expand` | *any*[] | :heavy_minus_sign: | If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion. | | -| `file` | *string* | :heavy_minus_sign: | The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose.

You should specify either `documents` or a `file`, but not both.
| | -| `logitBias` | *any* | :heavy_minus_sign: | N/A | | -| `logprobs` | *number* | :heavy_minus_sign: | Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.

The maximum value for `logprobs` is 5.

When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
| | -| `maxRerank` | *number* | :heavy_minus_sign: | The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. | | -| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of tokens allowed for the generated answer | | -| `model` | *string* | :heavy_check_mark: | ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`. | | -| `n` | *number* | :heavy_minus_sign: | How many answers to generate for each question. | | -| `question` | *string* | :heavy_check_mark: | Question to get answered. | What is the capital of Japan? | -| `returnMetadata` | *any* | :heavy_minus_sign: | N/A | | -| `returnPrompt` | *boolean* | :heavy_minus_sign: | If set to `true`, the returned JSON will include a "prompt" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes. | | -| `searchModel` | *string* | :heavy_minus_sign: | ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`. | | -| `stop` | *any* | :heavy_minus_sign: | completions_stop_description | | -| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. | | -| `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/shared/createanswerresponse.md b/docs/models/shared/createanswerresponse.md deleted file mode 100755 index b1536f9..0000000 --- a/docs/models/shared/createanswerresponse.md +++ /dev/null @@ -1,15 +0,0 @@ -# CreateAnswerResponse - -OK - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | -| `answers` | *string*[] | :heavy_minus_sign: | N/A | -| `completion` | *string* | :heavy_minus_sign: | N/A | -| `model` | *string* | :heavy_minus_sign: | N/A | -| `object` | *string* | :heavy_minus_sign: | N/A | -| `searchModel` | *string* | :heavy_minus_sign: | N/A | -| `selectedDocuments` | [CreateAnswerResponseSelectedDocuments](../../models/shared/createanswerresponseselecteddocuments.md)[] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createanswerresponseselecteddocuments.md b/docs/models/shared/createanswerresponseselecteddocuments.md deleted file mode 100755 index 529d040..0000000 --- a/docs/models/shared/createanswerresponseselecteddocuments.md +++ /dev/null @@ -1,9 +0,0 @@ -# CreateAnswerResponseSelectedDocuments - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `document` | *number* | :heavy_minus_sign: | N/A | -| `text` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionrequest.md b/docs/models/shared/createchatcompletionrequest.md index a608083..f6ac246 100755 --- a/docs/models/shared/createchatcompletionrequest.md +++ b/docs/models/shared/createchatcompletionrequest.md @@ -11,7 +11,7 @@ | `logitBias` | [CreateChatCompletionRequestLogitBias](../../models/shared/createchatcompletionrequestlogitbias.md) | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
| | | `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the chat completion.

The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| | | `messages` | [ChatCompletionRequestMessage](../../models/shared/chatcompletionrequestmessage.md)[] | :heavy_check_mark: | A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). | | -| `model` | *string* | :heavy_check_mark: | ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. | | +| `model` | *any* | :heavy_check_mark: | ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. | | | `n` | *number* | :heavy_minus_sign: | How many chat completion choices to generate for each input message. | 1 | | `presencePenalty` | *number* | :heavy_minus_sign: | completions_presence_penalty_description | | | `stop` | *any* | :heavy_minus_sign: | Up to 4 sequences where the API will stop generating further tokens.
| | diff --git a/docs/models/shared/createchatcompletionrequestmodel2.md b/docs/models/shared/createchatcompletionrequestmodel2.md new file mode 100755 index 0000000..ba2001a --- /dev/null +++ b/docs/models/shared/createchatcompletionrequestmodel2.md @@ -0,0 +1,17 @@ +# CreateChatCompletionRequestModel2 + +ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + + +## Values + +| Name | Value | +| ---------------------- | ---------------------- | +| `Gpt4` | gpt-4 | +| `Gpt40613` | gpt-4-0613 | +| `Gpt432k` | gpt-4-32k | +| `Gpt432k0613` | gpt-4-32k-0613 | +| `Gpt35Turbo` | gpt-3.5-turbo | +| `Gpt35Turbo16k` | gpt-3.5-turbo-16k | +| `Gpt35Turbo0613` | gpt-3.5-turbo-0613 | +| `Gpt35Turbo16k0613` | gpt-3.5-turbo-16k-0613 | \ No newline at end of file diff --git a/docs/models/shared/createclassificationrequest.md b/docs/models/shared/createclassificationrequest.md deleted file mode 100755 index ae408da..0000000 --- a/docs/models/shared/createclassificationrequest.md +++ /dev/null @@ -1,21 +0,0 @@ -# CreateClassificationRequest - - -## Fields - -| Field | Type | Required | Description | Example | -| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `examples` | *string*[][] | :heavy_minus_sign: | A list of examples with labels, in the following format:

`[["The movie is so interesting.", "Positive"], ["It is quite boring.", "Negative"], ...]`

All the label strings will be normalized to be capitalized.

You should specify either `examples` or `file`, but not both.
| | -| `expand` | *any* | :heavy_minus_sign: | N/A | | -| `file` | *string* | :heavy_minus_sign: | The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose.

You should specify either `examples` or `file`, but not both.
| | -| `labels` | *string*[] | :heavy_minus_sign: | The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized. | | -| `logitBias` | *any* | :heavy_minus_sign: | N/A | | -| `logprobs` | *any* | :heavy_minus_sign: | N/A | | -| `maxExamples` | *number* | :heavy_minus_sign: | The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. | | -| `model` | *any* | :heavy_check_mark: | N/A | | -| `query` | *string* | :heavy_check_mark: | Query to be classified. | The plot is not very attractive. | -| `returnMetadata` | *any* | :heavy_minus_sign: | N/A | | -| `returnPrompt` | *any* | :heavy_minus_sign: | N/A | | -| `searchModel` | *any* | :heavy_minus_sign: | N/A | | -| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. | 0 | -| `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/shared/createclassificationresponse.md b/docs/models/shared/createclassificationresponse.md deleted file mode 100755 index 409671d..0000000 --- a/docs/models/shared/createclassificationresponse.md +++ /dev/null @@ -1,15 +0,0 @@ -# CreateClassificationResponse - -OK - - -## Fields - -| Field | Type | Required | Description | -| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | -| `completion` | *string* | :heavy_minus_sign: | N/A | -| `label` | *string* | :heavy_minus_sign: | N/A | -| `model` | *string* | :heavy_minus_sign: | N/A | -| `object` | *string* | :heavy_minus_sign: | N/A | -| `searchModel` | *string* | :heavy_minus_sign: | N/A | -| `selectedExamples` | [CreateClassificationResponseSelectedExamples](../../models/shared/createclassificationresponseselectedexamples.md)[] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createclassificationresponseselectedexamples.md b/docs/models/shared/createclassificationresponseselectedexamples.md deleted file mode 100755 index acf8f7d..0000000 --- a/docs/models/shared/createclassificationresponseselectedexamples.md +++ /dev/null @@ -1,10 +0,0 @@ -# CreateClassificationResponseSelectedExamples - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `document` | *number* | :heavy_minus_sign: | N/A | -| `label` | *string* | :heavy_minus_sign: | N/A | -| `text` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createcompletionrequest.md b/docs/models/shared/createcompletionrequest.md index b05347e..d33ed0f 100755 --- a/docs/models/shared/createcompletionrequest.md +++ b/docs/models/shared/createcompletionrequest.md @@ -11,7 +11,7 @@ | `logitBias` | [CreateCompletionRequestLogitBias](../../models/shared/createcompletionrequestlogitbias.md) | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.

As an example, you can pass `{"50256": -100}` to prevent the <\|endoftext\|> token from being generated.
| | | `logprobs` | *number* | :heavy_minus_sign: | Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.

The maximum value for `logprobs` is 5.
| | | `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the completion.

The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| 16 | -| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. | | +| `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
| | | `n` | *number* | :heavy_minus_sign: | How many completions to generate for each prompt.

**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
| 1 | | `presencePenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.

[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
| | | `prompt` | *any* | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.

Note that <\|endoftext\|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
| | diff --git a/docs/models/shared/createcompletionrequestmodel2.md b/docs/models/shared/createcompletionrequestmodel2.md new file mode 100755 index 0000000..8f81c3e --- /dev/null +++ b/docs/models/shared/createcompletionrequestmodel2.md @@ -0,0 +1,17 @@ +# CreateCompletionRequestModel2 + +ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + + + +## Values + +| Name | Value | +| ---------------- | ---------------- | +| `TextDavinci003` | text-davinci-003 | +| `TextDavinci002` | text-davinci-002 | +| `TextDavinci001` | text-davinci-001 | +| `CodeDavinci002` | code-davinci-002 | +| `TextCurie001` | text-curie-001 | +| `TextBabbage001` | text-babbage-001 | +| `TextAda001` | text-ada-001 | \ No newline at end of file diff --git a/docs/models/shared/createeditrequest.md b/docs/models/shared/createeditrequest.md index 09ea8b9..a6a736d 100755 --- a/docs/models/shared/createeditrequest.md +++ b/docs/models/shared/createeditrequest.md @@ -7,7 +7,7 @@ | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | | `input` | *string* | :heavy_minus_sign: | The input text to use as a starting point for the edit. | What day of the wek is it? | | `instruction` | *string* | :heavy_check_mark: | The instruction that tells the model how to edit the prompt. | Fix the spelling mistakes. | -| `model` | *string* | :heavy_check_mark: | ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. | | +| `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. | | | `n` | *number* | :heavy_minus_sign: | How many edits to generate for the input and instruction. | 1 | | `temperature` | *number* | :heavy_minus_sign: | completions_temperature_description | 1 | | `topP` | *number* | :heavy_minus_sign: | completions_top_p_description | 1 | \ No newline at end of file diff --git a/docs/models/shared/createeditrequestmodel2.md b/docs/models/shared/createeditrequestmodel2.md new file mode 100755 index 0000000..9234d1e --- /dev/null +++ b/docs/models/shared/createeditrequestmodel2.md @@ -0,0 +1,11 @@ +# CreateEditRequestModel2 + +ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. + + +## Values + +| Name | Value | +| --------------------- | --------------------- | +| `TextDavinciEdit001` | text-davinci-edit-001 | +| `CodeDavinciEdit001` | code-davinci-edit-001 | \ No newline at end of file diff --git a/docs/models/shared/createembeddingrequest.md b/docs/models/shared/createembeddingrequest.md index 9ee2940..aacad57 100755 --- a/docs/models/shared/createembeddingrequest.md +++ b/docs/models/shared/createembeddingrequest.md @@ -6,5 +6,5 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `input` | *any* | :heavy_check_mark: | Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`). [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| -| `model` | *any* | :heavy_check_mark: | N/A | +| `model` | *any* | :heavy_check_mark: | model_description | | `user` | *any* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createembeddingrequestmodel2.md b/docs/models/shared/createembeddingrequestmodel2.md new file mode 100755 index 0000000..22ba5fa --- /dev/null +++ b/docs/models/shared/createembeddingrequestmodel2.md @@ -0,0 +1,10 @@ +# CreateEmbeddingRequestModel2 + +model_description + + +## Values + +| Name | Value | +| ---------------------- | ---------------------- | +| `TextEmbeddingAda002` | text-embedding-ada-002 | \ No newline at end of file diff --git a/docs/models/shared/createfinetunerequest.md b/docs/models/shared/createfinetunerequest.md index ee7ab0c..aaa9031 100755 --- a/docs/models/shared/createfinetunerequest.md +++ b/docs/models/shared/createfinetunerequest.md @@ -11,7 +11,7 @@ | `classificationPositiveClass` | *string* | :heavy_minus_sign: | The positive class in binary classification.

This parameter is needed to generate precision, recall, and F1
metrics when doing binary classification.
| | | `computeClassificationMetrics` | *boolean* | :heavy_minus_sign: | If set, we calculate classification-specific metrics such as accuracy
and F-1 score using the validation set at the end of every epoch.
These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model).

In order to compute classification metrics, you must provide a
`validation_file`. Additionally, you must
specify `classification_n_classes` for multiclass classification or
`classification_positive_class` for binary classification.
| | | `learningRateMultiplier` | *number* | :heavy_minus_sign: | The learning rate multiplier to use for training.
The fine-tuning learning rate is the original learning rate used for
pretraining multiplied by this value.

By default, the learning rate multiplier is the 0.05, 0.1, or 0.2
depending on final `batch_size` (larger learning rates tend to
perform better with larger batch sizes). We recommend experimenting
with values in the range 0.02 to 0.2 to see what produces the best
results.
| | -| `model` | *string* | :heavy_minus_sign: | The name of the base model to fine-tune. You can select one of "ada",
"babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21.
To learn more about these models, see the
[Models](https://platform.openai.com/docs/models) documentation.
| | +| `model` | *any* | :heavy_minus_sign: | The name of the base model to fine-tune. You can select one of "ada",
"babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21.
To learn more about these models, see the
[Models](https://platform.openai.com/docs/models) documentation.
| | | `nEpochs` | *number* | :heavy_minus_sign: | The number of epochs to train the model for. An epoch refers to one
full cycle through the training dataset.
| | | `promptLossWeight` | *number* | :heavy_minus_sign: | The weight to use for loss on the prompt tokens. This controls how
much the model tries to learn to generate the prompt (as compared
to the completion which always has a weight of 1.0), and can add
a stabilizing effect to training when completions are short.

If prompts are extremely long (relative to completions), it may make
sense to reduce this weight so as to avoid over-prioritizing
learning the prompt.
| | | `suffix` | *string* | :heavy_minus_sign: | A string of up to 40 characters that will be added to your fine-tuned model name.

For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
| | diff --git a/docs/models/shared/createfinetunerequestmodel2.md b/docs/models/shared/createfinetunerequestmodel2.md new file mode 100755 index 0000000..394d64f --- /dev/null +++ b/docs/models/shared/createfinetunerequestmodel2.md @@ -0,0 +1,17 @@ +# CreateFineTuneRequestModel2 + +The name of the base model to fine-tune. You can select one of "ada", +"babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. +To learn more about these models, see the +[Models](https://platform.openai.com/docs/models) documentation. + + + +## Values + +| Name | Value | +| --------- | --------- | +| `Ada` | ada | +| `Babbage` | babbage | +| `Curie` | curie | +| `Davinci` | davinci | \ No newline at end of file diff --git a/docs/models/shared/createmoderationrequest.md b/docs/models/shared/createmoderationrequest.md index 46e2e02..1b9a4be 100755 --- a/docs/models/shared/createmoderationrequest.md +++ b/docs/models/shared/createmoderationrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `input` | *any* | :heavy_check_mark: | The input text to classify | | -| `model` | *string* | :heavy_minus_sign: | Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.

The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
| text-moderation-stable | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input` | *any* | :heavy_check_mark: | The input text to classify | +| `model` | *any* | :heavy_minus_sign: | Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.

The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
| \ No newline at end of file diff --git a/docs/models/shared/createmoderationrequestmodel2.md b/docs/models/shared/createmoderationrequestmodel2.md new file mode 100755 index 0000000..c5138bd --- /dev/null +++ b/docs/models/shared/createmoderationrequestmodel2.md @@ -0,0 +1,14 @@ +# CreateModerationRequestModel2 + +Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + +The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + + + +## Values + +| Name | Value | +| ---------------------- | ---------------------- | +| `TextModerationLatest` | text-moderation-latest | +| `TextModerationStable` | text-moderation-stable | \ No newline at end of file diff --git a/docs/models/shared/createsearchrequest.md b/docs/models/shared/createsearchrequest.md deleted file mode 100755 index d07acc8..0000000 --- a/docs/models/shared/createsearchrequest.md +++ /dev/null @@ -1,13 +0,0 @@ -# CreateSearchRequest - - -## Fields - -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `documents` | *string*[] | :heavy_minus_sign: | Up to 200 documents to search over, provided as a list of strings.

The maximum document length (in tokens) is 2034 minus the number of tokens in the query.

You should specify either `documents` or a `file`, but not both.
| | -| `file` | *string* | :heavy_minus_sign: | The ID of an uploaded file that contains documents to search over.

You should specify either `documents` or a `file`, but not both.
| | -| `maxRerank` | *number* | :heavy_minus_sign: | The maximum number of documents to be re-ranked and returned by search.

This flag only takes effect when `file` is set.
| | -| `query` | *string* | :heavy_check_mark: | Query to search against the documents. | the president | -| `returnMetadata` | *boolean* | :heavy_minus_sign: | A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a "metadata" field.

This flag only takes effect when `file` is set.
| | -| `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/shared/createsearchresponse.md b/docs/models/shared/createsearchresponse.md deleted file mode 100755 index 3f9b362..0000000 --- a/docs/models/shared/createsearchresponse.md +++ /dev/null @@ -1,12 +0,0 @@ -# CreateSearchResponse - -OK - - -## Fields - -| Field | Type | Required | Description | -| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | -| `data` | [CreateSearchResponseData](../../models/shared/createsearchresponsedata.md)[] | :heavy_minus_sign: | N/A | -| `model` | *string* | :heavy_minus_sign: | N/A | -| `object` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createsearchresponsedata.md b/docs/models/shared/createsearchresponsedata.md deleted file mode 100755 index 4d79925..0000000 --- a/docs/models/shared/createsearchresponsedata.md +++ /dev/null @@ -1,10 +0,0 @@ -# CreateSearchResponseData - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `document` | *number* | :heavy_minus_sign: | N/A | -| `object` | *string* | :heavy_minus_sign: | N/A | -| `score` | *number* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequest.md b/docs/models/shared/createtranscriptionrequest.md index 9249593..3cea803 100755 --- a/docs/models/shared/createtranscriptionrequest.md +++ b/docs/models/shared/createtranscriptionrequest.md @@ -7,7 +7,7 @@ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `file` | [CreateTranscriptionRequestFile](../../models/shared/createtranscriptionrequestfile.md) | :heavy_check_mark: | The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
| | `language` | *string* | :heavy_minus_sign: | The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
| -| `model` | *string* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| +| `model` | *any* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| | `prompt` | *string* | :heavy_minus_sign: | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
| | `responseFormat` | *string* | :heavy_minus_sign: | The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
| | `temperature` | *number* | :heavy_minus_sign: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
| \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequestmodel2.md b/docs/models/shared/createtranscriptionrequestmodel2.md new file mode 100755 index 0000000..5057fc9 --- /dev/null +++ b/docs/models/shared/createtranscriptionrequestmodel2.md @@ -0,0 +1,11 @@ +# CreateTranscriptionRequestModel2 + +ID of the model to use. Only `whisper-1` is currently available. + + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `Whisper1` | whisper-1 | \ No newline at end of file diff --git a/docs/models/shared/createtranslationrequest.md b/docs/models/shared/createtranslationrequest.md index 07dc70a..c870dd8 100755 --- a/docs/models/shared/createtranslationrequest.md +++ b/docs/models/shared/createtranslationrequest.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `file` | [CreateTranslationRequestFile](../../models/shared/createtranslationrequestfile.md) | :heavy_check_mark: | The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
| -| `model` | *string* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| +| `model` | *any* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| | `prompt` | *string* | :heavy_minus_sign: | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
| | `responseFormat` | *string* | :heavy_minus_sign: | The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
| | `temperature` | *number* | :heavy_minus_sign: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
| \ No newline at end of file diff --git a/docs/models/shared/createtranslationrequestmodel2.md b/docs/models/shared/createtranslationrequestmodel2.md new file mode 100755 index 0000000..51f965c --- /dev/null +++ b/docs/models/shared/createtranslationrequestmodel2.md @@ -0,0 +1,11 @@ +# CreateTranslationRequestModel2 + +ID of the model to use. Only `whisper-1` is currently available. + + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `Whisper1` | whisper-1 | \ No newline at end of file diff --git a/docs/models/shared/engine.md b/docs/models/shared/engine.md deleted file mode 100755 index 66a3ff8..0000000 --- a/docs/models/shared/engine.md +++ /dev/null @@ -1,13 +0,0 @@ -# Engine - -OK - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `created` | *number* | :heavy_check_mark: | N/A | -| `id` | *string* | :heavy_check_mark: | N/A | -| `object` | *string* | :heavy_check_mark: | N/A | -| `ready` | *boolean* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/listenginesresponse.md b/docs/models/shared/listenginesresponse.md deleted file mode 100755 index e89b7c4..0000000 --- a/docs/models/shared/listenginesresponse.md +++ /dev/null @@ -1,11 +0,0 @@ -# ListEnginesResponse - -OK - - -## Fields - -| Field | Type | Required | Description | -| ----------------------------------------- | ----------------------------------------- | ----------------------------------------- | ----------------------------------------- | -| `data` | [Engine](../../models/shared/engine.md)[] | :heavy_check_mark: | N/A | -| `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index bb3c2f6..96ef7ef 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -8,21 +8,7 @@ The OpenAI REST API * [cancelFineTune](#cancelfinetune) - Immediately cancel a fine-tune job. -* [~~createAnswer~~](#createanswer) - Answers the specified question using the provided documents and examples. - -The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). - :warning: **Deprecated** * [createChatCompletion](#createchatcompletion) - Creates a model response for the given chat conversation. -* [~~createClassification~~](#createclassification) - Classifies the specified `query` using provided examples. - -The endpoint first [searches](/docs/api-reference/searches) over the labeled examples -to select the ones most relevant for the particular query. Then, the relevant examples -are combined with the query to construct a prompt to produce the final label via the -[completions](/docs/api-reference/completions) endpoint. - -Labeled examples can be provided via an uploaded `file`, or explicitly listed in the -request using the `examples` parameter for quick tests and small scale use cases. - :warning: **Deprecated** * [createCompletion](#createcompletion) - Creates a completion for the provided prompt and parameters. * [createEdit](#createedit) - Creates a new edit for the provided input, instruction, and parameters. * [createEmbedding](#createembedding) - Creates an embedding vector representing the input text. @@ -38,25 +24,17 @@ Response includes details of the enqueued job including job status and the name * [createImageEdit](#createimageedit) - Creates an edited or extended image given an original image and a prompt. * [createImageVariation](#createimagevariation) - Creates a variation of a given image. * [createModeration](#createmoderation) - Classifies if text violates OpenAI's Content Policy -* [~~createSearch~~](#createsearch) - The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. - -To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. - -The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. - :warning: **Deprecated** * [createTranscription](#createtranscription) - Transcribes audio into the input language. -* [createTranslation](#createtranslation) - Translates audio into into English. +* [createTranslation](#createtranslation) - Translates audio into English. * [deleteFile](#deletefile) - Delete a file. * [deleteModel](#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization. * [downloadFile](#downloadfile) - Returns the contents of the specified file -* [~~listEngines~~](#listengines) - Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. :warning: **Deprecated** * [listFiles](#listfiles) - Returns a list of files that belong to the user's organization. * [listFineTuneEvents](#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. * [listFineTunes](#listfinetunes) - List your organization's fine-tuning jobs * [listModels](#listmodels) - Lists the currently available models, and provides basic information about each one such as the owner and availability. -* [~~retrieveEngine~~](#retrieveengine) - Retrieves a model instance, providing basic information about it such as the owner and availability. :warning: **Deprecated** * [retrieveFile](#retrievefile) - Returns information about a specific file. * [retrieveFineTune](#retrievefinetune) - Gets info about the fine-tune job. @@ -99,89 +77,6 @@ sdk.openAI.cancelFineTune({ **Promise<[operations.CancelFineTuneResponse](../../models/operations/cancelfinetuneresponse.md)>** -## ~~createAnswer~~ - -Answers the specified question using the provided documents and examples. - -The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). - - -> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateAnswerResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createAnswer({ - documents: [ - "provident", - "distinctio", - "quibusdam", - ], - examples: [ - [ - "corrupti", - "illum", - "vel", - "error", - ], - [ - "suscipit", - "iure", - "magnam", - ], - [ - "ipsa", - "delectus", - "tempora", - "suscipit", - ], - ], - examplesContext: "Ottawa, Canada's capital, is located in the east of southern Ontario, near the city of Montréal and the U.S. border.", - expand: [ - "minus", - "placeat", - ], - file: "voluptatum", - logitBias: "iusto", - logprobs: 568045, - maxRerank: 392785, - maxTokens: 925597, - model: "temporibus", - n: 71036, - question: "What is the capital of Japan?", - returnMetadata: "quis", - returnPrompt: false, - searchModel: "veritatis", - stop: [ - "["\n"]", - ], - temperature: 3682.41, - user: "repellendus", -}).then((res: CreateAnswerResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | -| `request` | [shared.CreateAnswerRequest](../../models/shared/createanswerrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateAnswerResponse](../../models/operations/createanswerresponse.md)>** - - ## createChatCompletion Creates a model response for the given chat conversation. @@ -195,101 +90,86 @@ import { ChatCompletionRequestMessageRole, ChatCompletionResponseMessageRole, CreateChatCompletionRequestFunctionCall1, + CreateChatCompletionRequestModel2, CreateChatCompletionResponseChoicesFinishReason, } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); sdk.openAI.createChatCompletion({ - frequencyPenalty: 9571.56, + frequencyPenalty: 5488.14, functionCall: { - name: "Teri Strosin", + name: "Ellis Mitchell", }, functions: [ { - description: "quod", - name: "Deanna Sauer MD", + description: "vel", + name: "Doug Hoppe", parameters: { - "occaecati": "fugit", - "deleniti": "hic", - "optio": "totam", + "ipsa": "delectus", + "tempora": "suscipit", + "molestiae": "minus", + "placeat": "voluptatum", }, }, { - description: "beatae", - name: "Tanya Gleason", + description: "iusto", + name: "Charlie Walsh II", parameters: { - "esse": "ipsum", - "excepturi": "aspernatur", - "perferendis": "ad", + "deserunt": "perferendis", }, }, { - description: "natus", - name: "Sheryl Fadel", + description: "ipsam", + name: "Timmy Satterfield", parameters: { - "saepe": "fuga", - "in": "corporis", - "iste": "iure", - "saepe": "quidem", + "maiores": "molestiae", + "quod": "quod", + "esse": "totam", + "porro": "dolorum", }, }, { - description: "architecto", - name: "Lela Orn", + description: "dicta", + name: "Luke McCullough", parameters: { - "dolorem": "corporis", + "optio": "totam", + "beatae": "commodi", + "molestiae": "modi", + "qui": "impedit", }, }, ], logitBias: {}, - maxTokens: 128926, + maxTokens: 736918, messages: [ { - content: "enim", - functionCall: { - arguments: "omnis", - name: "Ms. Cathy Marks", - }, - name: "Darrin Brakus", - role: ChatCompletionRequestMessageRole.Assistant, - }, - { - content: "consequuntur", - functionCall: { - arguments: "repellat", - name: "Tracy Fritsch", - }, - name: "Shannon Mueller", - role: ChatCompletionRequestMessageRole.System, - }, - { - content: "laborum", + content: "ipsum", functionCall: { - arguments: "animi", - name: "Christina Satterfield", + arguments: "excepturi", + name: "Dorothy Hane", }, - name: "Mr. Alberta Schuster", + name: "Curtis Morissette", role: ChatCompletionRequestMessageRole.Function, }, { - content: "laborum", + content: "fuga", functionCall: { - arguments: "quasi", - name: "Jan Thiel", + arguments: "in", + name: "Sheryl Kertzmann", }, - name: "Jose Moen", - role: ChatCompletionRequestMessageRole.System, + name: "Brenda Wisozk", + role: ChatCompletionRequestMessageRole.Assistant, }, ], - model: "doloremque", + model: "gpt-3.5-turbo", n: 1, - presencePenalty: 4417.11, - stop: "maiores", + presencePenalty: 2103.82, + stop: "explicabo", stream: false, temperature: 1, topP: 1, - user: "dicta", + user: "nobis", }).then((res: CreateChatCompletionResponse) => { if (res.statusCode == 200) { // handle response @@ -310,76 +190,6 @@ sdk.openAI.createChatCompletion({ **Promise<[operations.CreateChatCompletionResponse](../../models/operations/createchatcompletionresponse.md)>** -## ~~createClassification~~ - -Classifies the specified `query` using provided examples. - -The endpoint first [searches](/docs/api-reference/searches) over the labeled examples -to select the ones most relevant for the particular query. Then, the relevant examples -are combined with the query to construct a prompt to produce the final label via the -[completions](/docs/api-reference/completions) endpoint. - -Labeled examples can be provided via an uploaded `file`, or explicitly listed in the -request using the `examples` parameter for quick tests and small scale use cases. - - -> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateClassificationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createClassification({ - examples: [ - [ - "iusto", - "dicta", - ], - [ - "enim", - "accusamus", - "commodi", - ], - ], - expand: "repudiandae", - file: "quae", - labels: [ - "quidem", - ], - logitBias: "molestias", - logprobs: "excepturi", - maxExamples: 865103, - model: "modi", - query: "The plot is not very attractive.", - returnMetadata: "praesentium", - returnPrompt: "rem", - searchModel: "voluptates", - temperature: 0, - user: "quasi", -}).then((res: CreateClassificationResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `request` | [shared.CreateClassificationRequest](../../models/shared/createclassificationrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateClassificationResponse](../../models/operations/createclassificationresponse.md)>** - - ## createCompletion Creates a completion for the provided prompt and parameters. @@ -389,22 +199,24 @@ Creates a completion for the provided prompt and parameters. ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { CreateCompletionResponseChoicesFinishReason } from "@speakeasy-api/openai/dist/sdk/models/shared"; +import { CreateCompletionRequestModel2, CreateCompletionResponseChoicesFinishReason } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); sdk.openAI.createCompletion({ - bestOf: 921158, + bestOf: 315428, echo: false, - frequencyPenalty: 5759.47, + frequencyPenalty: 6078.31, logitBias: {}, - logprobs: 83112, + logprobs: 363711, maxTokens: 16, - model: "itaque", + model: "excepturi", n: 1, - presencePenalty: 2777.18, + presencePenalty: 384.25, prompt: [ "This is a test.", + "This is a test.", + "This is a test.", ], stop: [ "["\n"]", @@ -446,14 +258,14 @@ Creates a new edit for the provided input, instruction, and parameters. ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateEditResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { CreateEditResponseChoicesFinishReason } from "@speakeasy-api/openai/dist/sdk/models/shared"; +import { CreateEditRequestModel2, CreateEditResponseChoicesFinishReason } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); sdk.openAI.createEdit({ input: "What day of the wek is it?", instruction: "Fix the spelling mistakes.", - model: "explicabo", + model: "text-davinci-edit-001", n: 1, temperature: 1, topP: 1, @@ -486,17 +298,16 @@ Creates an embedding vector representing the input text. ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateEmbeddingResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; +import { CreateEmbeddingRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); sdk.openAI.createEmbedding({ input: [ - 841386, - 289406, - 264730, + 635059, ], - model: "qui", - user: "aliquid", + model: "text-embedding-ada-002", + user: "repellat", }).then((res: CreateEmbeddingResponse) => { if (res.statusCode == 200) { // handle response @@ -532,10 +343,10 @@ const sdk = new Gpt(); sdk.openAI.createFile({ file: { - content: "cupiditate".encode(), - file: "quos", + content: "mollitia".encode(), + file: "occaecati", }, - purpose: "perferendis", + purpose: "numquam", }).then((res: CreateFileResponse) => { if (res.statusCode == 200) { // handle response @@ -570,25 +381,24 @@ Response includes details of the enqueued job including job status and the name ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; +import { CreateFineTuneRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); sdk.openAI.createFineTune({ - batchSize: 164940, + batchSize: 414369, classificationBetas: [ - 3698.08, - 46.95, - 1464.41, - 6778.17, + 4746.97, + 2444.25, ], - classificationNClasses: 569618, - classificationPositiveClass: "tempora", + classificationNClasses: 623510, + classificationPositiveClass: "quia", computeClassificationMetrics: false, - learningRateMultiplier: 7037.37, - model: "tempore", - nEpochs: 288476, - promptLossWeight: 9621.89, - suffix: "eum", + learningRateMultiplier: 3380.07, + model: "curie", + nEpochs: 674752, + promptLossWeight: 6563.3, + suffix: "enim", trainingFile: "file-ajSREls59WBbvgSzJSVWxMCB", validationFile: "file-XjSREls59WBbvgSzJSVWxMCa", }).then((res: CreateFineTuneResponse) => { @@ -629,7 +439,7 @@ sdk.openAI.createImage({ prompt: "A cute baby sea otter", responseFormat: CreateImageRequestResponseFormat.Url, size: CreateImageRequestSize.OneThousandAndTwentyFourx1024, - user: "non", + user: "odit", }).then((res: CreateImageResponse) => { if (res.statusCode == 200) { // handle response @@ -664,18 +474,18 @@ const sdk = new Gpt(); sdk.openAI.createImageEdit({ image: { - content: "eligendi".encode(), - image: "sint", + content: "quo".encode(), + image: "sequi", }, mask: { - content: "aliquid".encode(), - mask: "provident", + content: "tenetur".encode(), + mask: "ipsam", }, - n: "necessitatibus", + n: "id", prompt: "A cute baby sea otter wearing a beret", - responseFormat: "sint", - size: "officia", - user: "dolor", + responseFormat: "possimus", + size: "aut", + user: "quasi", }).then((res: CreateImageEditResponse) => { if (res.statusCode == 200) { // handle response @@ -710,13 +520,13 @@ const sdk = new Gpt(); sdk.openAI.createImageVariation({ image: { - content: "debitis".encode(), - image: "a", + content: "error".encode(), + image: "temporibus", }, - n: "dolorum", - responseFormat: "in", - size: "in", - user: "illum", + n: "laborum", + responseFormat: "quasi", + size: "reiciendis", + user: "voluptatibus", }).then((res: CreateImageVariationResponse) => { if (res.statusCode == 200) { // handle response @@ -746,6 +556,7 @@ Classifies if text violates OpenAI's Content Policy ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateModerationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; +import { CreateModerationRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); @@ -753,9 +564,8 @@ sdk.openAI.createModeration({ input: [ "I want to kill them.", "I want to kill them.", - "I want to kill them.", ], - model: "text-moderation-stable", + model: CreateModerationRequestModel2.TextModerationStable, }).then((res: CreateModerationResponse) => { if (res.statusCode == 200) { // handle response @@ -776,57 +586,6 @@ sdk.openAI.createModeration({ **Promise<[operations.CreateModerationResponse](../../models/operations/createmoderationresponse.md)>** -## ~~createSearch~~ - -The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. - -To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. - -The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. - - -> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateSearchResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.createSearch({ - createSearchRequest: { - documents: [ - "magnam", - ], - file: "cumque", - maxRerank: 813798, - query: "the president", - returnMetadata: false, - user: "ea", - }, - engineId: "davinci", -}).then((res: CreateSearchResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `request` | [operations.CreateSearchRequest](../../models/operations/createsearchrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateSearchResponse](../../models/operations/createsearchresponse.md)>** - - ## createTranscription Transcribes audio into the input language. @@ -836,19 +595,20 @@ Transcribes audio into the input language. ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateTranscriptionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; +import { CreateTranscriptionRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); sdk.openAI.createTranscription({ file: { - content: "aliquid".encode(), - file: "laborum", + content: "voluptatibus".encode(), + file: "ipsa", }, - language: "accusamus", - model: "non", - prompt: "occaecati", - responseFormat: "enim", - temperature: 8817.36, + language: "omnis", + model: "whisper-1", + prompt: "cum", + responseFormat: "perferendis", + temperature: 391.87, }).then((res: CreateTranscriptionResponse) => { if (res.statusCode == 200) { // handle response @@ -871,25 +631,26 @@ sdk.openAI.createTranscription({ ## createTranslation -Translates audio into into English. +Translates audio into English. ### Example Usage ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateTranslationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; +import { CreateTranslationRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); sdk.openAI.createTranslation({ file: { - content: "delectus".encode(), - file: "quidem", + content: "reprehenderit".encode(), + file: "ut", }, - model: "provident", - prompt: "nam", - responseFormat: "id", - temperature: 5013.24, + model: CreateTranslationRequestModel2.Whisper1, + prompt: "dicta", + responseFormat: "corporis", + temperature: 2961.4, }).then((res: CreateTranslationResponse) => { if (res.statusCode == 200) { // handle response @@ -923,7 +684,7 @@ import { DeleteFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operat const sdk = new Gpt(); sdk.openAI.deleteFile({ - fileId: "deleniti", + fileId: "iusto", }).then((res: DeleteFileResponse) => { if (res.statusCode == 200) { // handle response @@ -991,7 +752,7 @@ import { DownloadFileResponse } from "@speakeasy-api/openai/dist/sdk/models/oper const sdk = new Gpt(); sdk.openAI.downloadFile({ - fileId: "sapiente", + fileId: "dicta", }).then((res: DownloadFileResponse) => { if (res.statusCode == 200) { // handle response @@ -1012,39 +773,6 @@ sdk.openAI.downloadFile({ **Promise<[operations.DownloadFileResponse](../../models/operations/downloadfileresponse.md)>** -## ~~listEngines~~ - -Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. - -> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { ListEnginesResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.listEngines().then((res: ListEnginesResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.ListEnginesResponse](../../models/operations/listenginesresponse.md)>** - - ## listFiles Returns a list of files that belong to the user's organization. @@ -1175,42 +903,6 @@ sdk.openAI.listModels().then((res: ListModelsResponse) => { **Promise<[operations.ListModelsResponse](../../models/operations/listmodelsresponse.md)>** -## ~~retrieveEngine~~ - -Retrieves a model instance, providing basic information about it such as the owner and availability. - -> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { RetrieveEngineResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt(); - -sdk.openAI.retrieveEngine({ - engineId: "davinci", -}).then((res: RetrieveEngineResponse) => { - if (res.statusCode == 200) { - // handle response - } -}); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `request` | [operations.RetrieveEngineRequest](../../models/operations/retrieveenginerequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.RetrieveEngineResponse](../../models/operations/retrieveengineresponse.md)>** - - ## retrieveFile Returns information about a specific file. @@ -1224,7 +916,7 @@ import { RetrieveFileResponse } from "@speakeasy-api/openai/dist/sdk/models/oper const sdk = new Gpt(); sdk.openAI.retrieveFile({ - fileId: "amet", + fileId: "harum", }).then((res: RetrieveFileResponse) => { if (res.statusCode == 200) { // handle response diff --git a/files.gen b/files.gen index 9956f3b..e619681 100755 --- a/files.gen +++ b/files.gen @@ -19,9 +19,7 @@ src/sdk/types/index.ts src/sdk/types/rfcdate.ts tsconfig.json src/sdk/models/operations/cancelfinetune.ts -src/sdk/models/operations/createanswer.ts src/sdk/models/operations/createchatcompletion.ts -src/sdk/models/operations/createclassification.ts src/sdk/models/operations/createcompletion.ts src/sdk/models/operations/createedit.ts src/sdk/models/operations/createembedding.ts @@ -31,18 +29,15 @@ src/sdk/models/operations/createimage.ts src/sdk/models/operations/createimageedit.ts src/sdk/models/operations/createimagevariation.ts src/sdk/models/operations/createmoderation.ts -src/sdk/models/operations/createsearch.ts src/sdk/models/operations/createtranscription.ts src/sdk/models/operations/createtranslation.ts src/sdk/models/operations/deletefile.ts src/sdk/models/operations/deletemodel.ts src/sdk/models/operations/downloadfile.ts -src/sdk/models/operations/listengines.ts src/sdk/models/operations/listfiles.ts src/sdk/models/operations/listfinetuneevents.ts src/sdk/models/operations/listfinetunes.ts src/sdk/models/operations/listmodels.ts -src/sdk/models/operations/retrieveengine.ts src/sdk/models/operations/retrievefile.ts src/sdk/models/operations/retrievefinetune.ts src/sdk/models/operations/retrievemodel.ts @@ -50,15 +45,11 @@ src/sdk/models/operations/index.ts src/sdk/models/shared/finetune.ts src/sdk/models/shared/openaifile.ts src/sdk/models/shared/finetuneevent.ts -src/sdk/models/shared/createanswerresponse.ts -src/sdk/models/shared/createanswerrequest.ts src/sdk/models/shared/createchatcompletionresponse.ts src/sdk/models/shared/chatcompletionresponsemessage.ts src/sdk/models/shared/createchatcompletionrequest.ts src/sdk/models/shared/chatcompletionrequestmessage.ts src/sdk/models/shared/chatcompletionfunctions.ts -src/sdk/models/shared/createclassificationresponse.ts -src/sdk/models/shared/createclassificationrequest.ts src/sdk/models/shared/createcompletionresponse.ts src/sdk/models/shared/createcompletionrequest.ts src/sdk/models/shared/createeditresponse.ts @@ -73,16 +64,12 @@ src/sdk/models/shared/createimageeditrequest.ts src/sdk/models/shared/createimagevariationrequest.ts src/sdk/models/shared/createmoderationresponse.ts src/sdk/models/shared/createmoderationrequest.ts -src/sdk/models/shared/createsearchresponse.ts -src/sdk/models/shared/createsearchrequest.ts src/sdk/models/shared/createtranscriptionresponse.ts src/sdk/models/shared/createtranscriptionrequest.ts src/sdk/models/shared/createtranslationresponse.ts src/sdk/models/shared/createtranslationrequest.ts src/sdk/models/shared/deletefileresponse.ts src/sdk/models/shared/deletemodelresponse.ts -src/sdk/models/shared/listenginesresponse.ts -src/sdk/models/shared/engine.ts src/sdk/models/shared/listfilesresponse.ts src/sdk/models/shared/listfinetuneeventsresponse.ts src/sdk/models/shared/listfinetunesresponse.ts @@ -94,9 +81,7 @@ docs/sdks/openai/README.md USAGE.md docs/models/operations/cancelfinetunerequest.md docs/models/operations/cancelfinetuneresponse.md -docs/models/operations/createanswerresponse.md docs/models/operations/createchatcompletionresponse.md -docs/models/operations/createclassificationresponse.md docs/models/operations/createcompletionresponse.md docs/models/operations/createeditresponse.md docs/models/operations/createembeddingresponse.md @@ -106,8 +91,6 @@ docs/models/operations/createimageresponse.md docs/models/operations/createimageeditresponse.md docs/models/operations/createimagevariationresponse.md docs/models/operations/createmoderationresponse.md -docs/models/operations/createsearchrequest.md -docs/models/operations/createsearchresponse.md docs/models/operations/createtranscriptionresponse.md docs/models/operations/createtranslationresponse.md docs/models/operations/deletefilerequest.md @@ -116,14 +99,11 @@ docs/models/operations/deletemodelrequest.md docs/models/operations/deletemodelresponse.md docs/models/operations/downloadfilerequest.md docs/models/operations/downloadfileresponse.md -docs/models/operations/listenginesresponse.md docs/models/operations/listfilesresponse.md docs/models/operations/listfinetuneeventsrequest.md docs/models/operations/listfinetuneeventsresponse.md docs/models/operations/listfinetunesresponse.md docs/models/operations/listmodelsresponse.md -docs/models/operations/retrieveenginerequest.md -docs/models/operations/retrieveengineresponse.md docs/models/operations/retrievefilerequest.md docs/models/operations/retrievefileresponse.md docs/models/operations/retrievefinetunerequest.md @@ -135,9 +115,6 @@ docs/models/shared/finetune.md docs/models/shared/openaifilestatusdetails.md docs/models/shared/openaifile.md docs/models/shared/finetuneevent.md -docs/models/shared/createanswerresponseselecteddocuments.md -docs/models/shared/createanswerresponse.md -docs/models/shared/createanswerrequest.md docs/models/shared/createchatcompletionresponsechoicesfinishreason.md docs/models/shared/createchatcompletionresponsechoices.md docs/models/shared/createchatcompletionresponseusage.md @@ -148,14 +125,12 @@ docs/models/shared/chatcompletionresponsemessage.md docs/models/shared/createchatcompletionrequestfunctioncall2.md docs/models/shared/createchatcompletionrequestfunctioncall1.md docs/models/shared/createchatcompletionrequestlogitbias.md +docs/models/shared/createchatcompletionrequestmodel2.md docs/models/shared/createchatcompletionrequest.md docs/models/shared/chatcompletionrequestmessagefunctioncall.md docs/models/shared/chatcompletionrequestmessagerole.md docs/models/shared/chatcompletionrequestmessage.md docs/models/shared/chatcompletionfunctions.md -docs/models/shared/createclassificationresponseselectedexamples.md -docs/models/shared/createclassificationresponse.md -docs/models/shared/createclassificationrequest.md docs/models/shared/createcompletionresponsechoicesfinishreason.md docs/models/shared/createcompletionresponsechoiceslogprobstoplogprobs.md docs/models/shared/createcompletionresponsechoiceslogprobs.md @@ -163,6 +138,7 @@ docs/models/shared/createcompletionresponsechoices.md docs/models/shared/createcompletionresponseusage.md docs/models/shared/createcompletionresponse.md docs/models/shared/createcompletionrequestlogitbias.md +docs/models/shared/createcompletionrequestmodel2.md docs/models/shared/createcompletionrequest.md docs/models/shared/createeditresponsechoicesfinishreason.md docs/models/shared/createeditresponsechoiceslogprobstoplogprobs.md @@ -170,13 +146,16 @@ docs/models/shared/createeditresponsechoiceslogprobs.md docs/models/shared/createeditresponsechoices.md docs/models/shared/createeditresponseusage.md docs/models/shared/createeditresponse.md +docs/models/shared/createeditrequestmodel2.md docs/models/shared/createeditrequest.md docs/models/shared/createembeddingresponsedata.md docs/models/shared/createembeddingresponseusage.md docs/models/shared/createembeddingresponse.md +docs/models/shared/createembeddingrequestmodel2.md docs/models/shared/createembeddingrequest.md docs/models/shared/createfilerequestfile.md docs/models/shared/createfilerequest.md +docs/models/shared/createfinetunerequestmodel2.md docs/models/shared/createfinetunerequest.md docs/models/shared/imagesresponsedata.md docs/models/shared/imagesresponse.md @@ -192,20 +171,18 @@ docs/models/shared/createmoderationresponseresultscategories.md docs/models/shared/createmoderationresponseresultscategoryscores.md docs/models/shared/createmoderationresponseresults.md docs/models/shared/createmoderationresponse.md +docs/models/shared/createmoderationrequestmodel2.md docs/models/shared/createmoderationrequest.md -docs/models/shared/createsearchresponsedata.md -docs/models/shared/createsearchresponse.md -docs/models/shared/createsearchrequest.md docs/models/shared/createtranscriptionresponse.md docs/models/shared/createtranscriptionrequestfile.md +docs/models/shared/createtranscriptionrequestmodel2.md docs/models/shared/createtranscriptionrequest.md docs/models/shared/createtranslationresponse.md docs/models/shared/createtranslationrequestfile.md +docs/models/shared/createtranslationrequestmodel2.md docs/models/shared/createtranslationrequest.md docs/models/shared/deletefileresponse.md docs/models/shared/deletemodelresponse.md -docs/models/shared/listenginesresponse.md -docs/models/shared/engine.md docs/models/shared/listfilesresponse.md docs/models/shared/listfinetuneeventsresponse.md docs/models/shared/listfinetunesresponse.md diff --git a/gen.yaml b/gen.yaml index 13347c6..f5e6fbb 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,16 +1,16 @@ configVersion: 1.0.0 management: - docChecksum: d0ce7d708c8bcb95aae605d27c99cb19 - docVersion: 1.3.0 - speakeasyVersion: 1.48.0 - generationVersion: 2.41.1 + docChecksum: 905cd76122997f2d869255c564530a31 + docVersion: 2.0.0 + speakeasyVersion: 1.49.0 + generationVersion: 2.41.4 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 1.13.1 + version: 2.0.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 035449f..149c67d 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "1.13.1", + "version": "2.0.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "1.13.1", + "version": "2.0.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 6d5c6e8..a650259 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "1.13.1", + "version": "2.0.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/operations/createanswer.ts b/src/sdk/models/operations/createanswer.ts deleted file mode 100755 index a3d5554..0000000 --- a/src/sdk/models/operations/createanswer.ts +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import * as shared from "../shared"; -import { AxiosResponse } from "axios"; - -export class CreateAnswerResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; - - /** - * OK - */ - @SpeakeasyMetadata() - createAnswerResponse?: shared.CreateAnswerResponse; - - @SpeakeasyMetadata() - statusCode: number; - - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; -} diff --git a/src/sdk/models/operations/createclassification.ts b/src/sdk/models/operations/createclassification.ts deleted file mode 100755 index 97c359f..0000000 --- a/src/sdk/models/operations/createclassification.ts +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import * as shared from "../shared"; -import { AxiosResponse } from "axios"; - -export class CreateClassificationResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; - - /** - * OK - */ - @SpeakeasyMetadata() - createClassificationResponse?: shared.CreateClassificationResponse; - - @SpeakeasyMetadata() - statusCode: number; - - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; -} diff --git a/src/sdk/models/operations/createsearch.ts b/src/sdk/models/operations/createsearch.ts deleted file mode 100755 index 8452f3e..0000000 --- a/src/sdk/models/operations/createsearch.ts +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import * as shared from "../shared"; -import { AxiosResponse } from "axios"; - -export class CreateSearchRequest extends SpeakeasyBase { - @SpeakeasyMetadata({ data: "request, media_type=application/json" }) - createSearchRequest: shared.CreateSearchRequest; - - /** - * The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`. - */ - @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=engine_id" }) - engineId: string; -} - -export class CreateSearchResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; - - /** - * OK - */ - @SpeakeasyMetadata() - createSearchResponse?: shared.CreateSearchResponse; - - @SpeakeasyMetadata() - statusCode: number; - - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; -} diff --git a/src/sdk/models/operations/index.ts b/src/sdk/models/operations/index.ts index a8bf1e0..e2fa564 100755 --- a/src/sdk/models/operations/index.ts +++ b/src/sdk/models/operations/index.ts @@ -3,9 +3,7 @@ */ export * from "./cancelfinetune"; -export * from "./createanswer"; export * from "./createchatcompletion"; -export * from "./createclassification"; export * from "./createcompletion"; export * from "./createedit"; export * from "./createembedding"; @@ -15,18 +13,15 @@ export * from "./createimage"; export * from "./createimageedit"; export * from "./createimagevariation"; export * from "./createmoderation"; -export * from "./createsearch"; export * from "./createtranscription"; export * from "./createtranslation"; export * from "./deletefile"; export * from "./deletemodel"; export * from "./downloadfile"; -export * from "./listengines"; export * from "./listfiles"; export * from "./listfinetuneevents"; export * from "./listfinetunes"; export * from "./listmodels"; -export * from "./retrieveengine"; export * from "./retrievefile"; export * from "./retrievefinetune"; export * from "./retrievemodel"; diff --git a/src/sdk/models/operations/listengines.ts b/src/sdk/models/operations/listengines.ts deleted file mode 100755 index 6d1b335..0000000 --- a/src/sdk/models/operations/listengines.ts +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import * as shared from "../shared"; -import { AxiosResponse } from "axios"; - -export class ListEnginesResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; - - /** - * OK - */ - @SpeakeasyMetadata() - listEnginesResponse?: shared.ListEnginesResponse; - - @SpeakeasyMetadata() - statusCode: number; - - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; -} diff --git a/src/sdk/models/operations/retrieveengine.ts b/src/sdk/models/operations/retrieveengine.ts deleted file mode 100755 index 9606bd0..0000000 --- a/src/sdk/models/operations/retrieveengine.ts +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import * as shared from "../shared"; -import { AxiosResponse } from "axios"; - -export class RetrieveEngineRequest extends SpeakeasyBase { - /** - * The ID of the engine to use for this request - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=engine_id" }) - engineId: string; -} - -export class RetrieveEngineResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - contentType: string; - - /** - * OK - */ - @SpeakeasyMetadata() - engine?: shared.Engine; - - @SpeakeasyMetadata() - statusCode: number; - - @SpeakeasyMetadata() - rawResponse?: AxiosResponse; -} diff --git a/src/sdk/models/shared/createanswerrequest.ts b/src/sdk/models/shared/createanswerrequest.ts deleted file mode 100755 index bb0037a..0000000 --- a/src/sdk/models/shared/createanswerrequest.ts +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; - -export class CreateAnswerRequest extends SpeakeasyBase { - /** - * List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. - * - * @remarks - * - * You should specify either `documents` or a `file`, but not both. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "documents" }) - documents?: string[]; - - /** - * List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples. - */ - @SpeakeasyMetadata() - @Expose({ name: "examples" }) - examples: string[][]; - - /** - * A text snippet containing the contextual information used to generate the answers for the `examples` you provide. - */ - @SpeakeasyMetadata() - @Expose({ name: "examples_context" }) - examplesContext: string; - - /** - * If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion. - */ - @SpeakeasyMetadata() - @Expose({ name: "expand" }) - expand?: any[]; - - /** - * The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. - * - * @remarks - * - * You should specify either `documents` or a `file`, but not both. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "file" }) - file?: string; - - @SpeakeasyMetadata() - @Expose({ name: "logit_bias" }) - logitBias?: any; - - /** - * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - * - * @remarks - * - * The maximum value for `logprobs` is 5. - * - * When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "logprobs" }) - logprobs?: number; - - /** - * The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. - */ - @SpeakeasyMetadata() - @Expose({ name: "max_rerank" }) - maxRerank?: number; - - /** - * The maximum number of tokens allowed for the generated answer - */ - @SpeakeasyMetadata() - @Expose({ name: "max_tokens" }) - maxTokens?: number; - - /** - * ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`. - */ - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model: string; - - /** - * How many answers to generate for each question. - */ - @SpeakeasyMetadata() - @Expose({ name: "n" }) - n?: number; - - /** - * Question to get answered. - */ - @SpeakeasyMetadata() - @Expose({ name: "question" }) - question: string; - - @SpeakeasyMetadata() - @Expose({ name: "return_metadata" }) - returnMetadata?: any; - - /** - * If set to `true`, the returned JSON will include a "prompt" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes. - */ - @SpeakeasyMetadata() - @Expose({ name: "return_prompt" }) - returnPrompt?: boolean; - - /** - * ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`. - */ - @SpeakeasyMetadata() - @Expose({ name: "search_model" }) - searchModel?: string; - - /** - * completions_stop_description - */ - @SpeakeasyMetadata() - @Expose({ name: "stop" }) - stop?: any; - - /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - */ - @SpeakeasyMetadata() - @Expose({ name: "temperature" }) - temperature?: number; - - @SpeakeasyMetadata() - @Expose({ name: "user" }) - user?: any; -} diff --git a/src/sdk/models/shared/createanswerresponse.ts b/src/sdk/models/shared/createanswerresponse.ts deleted file mode 100755 index cb1b306..0000000 --- a/src/sdk/models/shared/createanswerresponse.ts +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose, Type } from "class-transformer"; - -export class CreateAnswerResponseSelectedDocuments extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "document" }) - document?: number; - - @SpeakeasyMetadata() - @Expose({ name: "text" }) - text?: string; -} - -/** - * OK - */ -export class CreateAnswerResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "answers" }) - answers?: string[]; - - @SpeakeasyMetadata() - @Expose({ name: "completion" }) - completion?: string; - - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model?: string; - - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object?: string; - - @SpeakeasyMetadata() - @Expose({ name: "search_model" }) - searchModel?: string; - - @SpeakeasyMetadata({ elemType: CreateAnswerResponseSelectedDocuments }) - @Expose({ name: "selected_documents" }) - @Type(() => CreateAnswerResponseSelectedDocuments) - selectedDocuments?: CreateAnswerResponseSelectedDocuments[]; -} diff --git a/src/sdk/models/shared/createchatcompletionrequest.ts b/src/sdk/models/shared/createchatcompletionrequest.ts index e350233..b93376b 100755 --- a/src/sdk/models/shared/createchatcompletionrequest.ts +++ b/src/sdk/models/shared/createchatcompletionrequest.ts @@ -37,6 +37,20 @@ export enum CreateChatCompletionRequestFunctionCall1 { */ export class CreateChatCompletionRequestLogitBias extends SpeakeasyBase {} +/** + * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + */ +export enum CreateChatCompletionRequestModel2 { + Gpt4 = "gpt-4", + Gpt40613 = "gpt-4-0613", + Gpt432k = "gpt-4-32k", + Gpt432k0613 = "gpt-4-32k-0613", + Gpt35Turbo = "gpt-3.5-turbo", + Gpt35Turbo16k = "gpt-3.5-turbo-16k", + Gpt35Turbo0613 = "gpt-3.5-turbo-0613", + Gpt35Turbo16k0613 = "gpt-3.5-turbo-16k-0613", +} + export class CreateChatCompletionRequest extends SpeakeasyBase { /** * completions_frequency_penalty_description @@ -98,7 +112,7 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { */ @SpeakeasyMetadata() @Expose({ name: "model" }) - model: string; + model: any; /** * How many chat completion choices to generate for each input message. diff --git a/src/sdk/models/shared/createclassificationrequest.ts b/src/sdk/models/shared/createclassificationrequest.ts deleted file mode 100755 index 4e99161..0000000 --- a/src/sdk/models/shared/createclassificationrequest.ts +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; - -export class CreateClassificationRequest extends SpeakeasyBase { - /** - * A list of examples with labels, in the following format: - * - * @remarks - * - * `[["The movie is so interesting.", "Positive"], ["It is quite boring.", "Negative"], ...]` - * - * All the label strings will be normalized to be capitalized. - * - * You should specify either `examples` or `file`, but not both. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "examples" }) - examples?: string[][]; - - @SpeakeasyMetadata() - @Expose({ name: "expand" }) - expand?: any; - - /** - * The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. - * - * @remarks - * - * You should specify either `examples` or `file`, but not both. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "file" }) - file?: string; - - /** - * The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized. - */ - @SpeakeasyMetadata() - @Expose({ name: "labels" }) - labels?: string[]; - - @SpeakeasyMetadata() - @Expose({ name: "logit_bias" }) - logitBias?: any; - - @SpeakeasyMetadata() - @Expose({ name: "logprobs" }) - logprobs?: any; - - /** - * The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. - */ - @SpeakeasyMetadata() - @Expose({ name: "max_examples" }) - maxExamples?: number; - - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model: any; - - /** - * Query to be classified. - */ - @SpeakeasyMetadata() - @Expose({ name: "query" }) - query: string; - - @SpeakeasyMetadata() - @Expose({ name: "return_metadata" }) - returnMetadata?: any; - - @SpeakeasyMetadata() - @Expose({ name: "return_prompt" }) - returnPrompt?: any; - - @SpeakeasyMetadata() - @Expose({ name: "search_model" }) - searchModel?: any; - - /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - */ - @SpeakeasyMetadata() - @Expose({ name: "temperature" }) - temperature?: number; - - @SpeakeasyMetadata() - @Expose({ name: "user" }) - user?: any; -} diff --git a/src/sdk/models/shared/createclassificationresponse.ts b/src/sdk/models/shared/createclassificationresponse.ts deleted file mode 100755 index bc9ad9b..0000000 --- a/src/sdk/models/shared/createclassificationresponse.ts +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose, Type } from "class-transformer"; - -export class CreateClassificationResponseSelectedExamples extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "document" }) - document?: number; - - @SpeakeasyMetadata() - @Expose({ name: "label" }) - label?: string; - - @SpeakeasyMetadata() - @Expose({ name: "text" }) - text?: string; -} - -/** - * OK - */ -export class CreateClassificationResponse extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "completion" }) - completion?: string; - - @SpeakeasyMetadata() - @Expose({ name: "label" }) - label?: string; - - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model?: string; - - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object?: string; - - @SpeakeasyMetadata() - @Expose({ name: "search_model" }) - searchModel?: string; - - @SpeakeasyMetadata({ elemType: CreateClassificationResponseSelectedExamples }) - @Expose({ name: "selected_examples" }) - @Type(() => CreateClassificationResponseSelectedExamples) - selectedExamples?: CreateClassificationResponseSelectedExamples[]; -} diff --git a/src/sdk/models/shared/createcompletionrequest.ts b/src/sdk/models/shared/createcompletionrequest.ts index 528f8af..7bfa3bd 100755 --- a/src/sdk/models/shared/createcompletionrequest.ts +++ b/src/sdk/models/shared/createcompletionrequest.ts @@ -17,6 +17,22 @@ import { Expose, Type } from "class-transformer"; */ export class CreateCompletionRequestLogitBias extends SpeakeasyBase {} +/** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + * + * @remarks + * + */ +export enum CreateCompletionRequestModel2 { + TextDavinci003 = "text-davinci-003", + TextDavinci002 = "text-davinci-002", + TextDavinci001 = "text-davinci-001", + CodeDavinci002 = "code-davinci-002", + TextCurie001 = "text-curie-001", + TextBabbage001 = "text-babbage-001", + TextAda001 = "text-ada-001", +} + export class CreateCompletionRequest extends SpeakeasyBase { /** * Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. @@ -95,10 +111,13 @@ export class CreateCompletionRequest extends SpeakeasyBase { /** * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + * + * @remarks + * */ @SpeakeasyMetadata() @Expose({ name: "model" }) - model: string; + model: any; /** * How many completions to generate for each prompt. diff --git a/src/sdk/models/shared/createeditrequest.ts b/src/sdk/models/shared/createeditrequest.ts index 5ab2b38..2a99b1a 100755 --- a/src/sdk/models/shared/createeditrequest.ts +++ b/src/sdk/models/shared/createeditrequest.ts @@ -5,6 +5,14 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; +/** + * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. + */ +export enum CreateEditRequestModel2 { + TextDavinciEdit001 = "text-davinci-edit-001", + CodeDavinciEdit001 = "code-davinci-edit-001", +} + export class CreateEditRequest extends SpeakeasyBase { /** * The input text to use as a starting point for the edit. @@ -25,7 +33,7 @@ export class CreateEditRequest extends SpeakeasyBase { */ @SpeakeasyMetadata() @Expose({ name: "model" }) - model: string; + model: any; /** * How many edits to generate for the input and instruction. diff --git a/src/sdk/models/shared/createembeddingrequest.ts b/src/sdk/models/shared/createembeddingrequest.ts index 8c25804..d3beb82 100755 --- a/src/sdk/models/shared/createembeddingrequest.ts +++ b/src/sdk/models/shared/createembeddingrequest.ts @@ -5,6 +5,13 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; +/** + * model_description + */ +export enum CreateEmbeddingRequestModel2 { + TextEmbeddingAda002 = "text-embedding-ada-002", +} + export class CreateEmbeddingRequest extends SpeakeasyBase { /** * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`). [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. @@ -16,6 +23,9 @@ export class CreateEmbeddingRequest extends SpeakeasyBase { @Expose({ name: "input" }) input: any; + /** + * model_description + */ @SpeakeasyMetadata() @Expose({ name: "model" }) model: any; diff --git a/src/sdk/models/shared/createfinetunerequest.ts b/src/sdk/models/shared/createfinetunerequest.ts index bc4f1b1..6414ac8 100755 --- a/src/sdk/models/shared/createfinetunerequest.ts +++ b/src/sdk/models/shared/createfinetunerequest.ts @@ -5,6 +5,22 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; +/** + * The name of the base model to fine-tune. You can select one of "ada", + * + * @remarks + * "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. + * To learn more about these models, see the + * [Models](https://platform.openai.com/docs/models) documentation. + * + */ +export enum CreateFineTuneRequestModel2 { + Ada = "ada", + Babbage = "babbage", + Curie = "curie", + Davinci = "davinci", +} + export class CreateFineTuneRequest extends SpeakeasyBase { /** * The batch size to use for training. The batch size is the number of @@ -110,7 +126,7 @@ export class CreateFineTuneRequest extends SpeakeasyBase { */ @SpeakeasyMetadata() @Expose({ name: "model" }) - model?: string; + model?: any; /** * The number of epochs to train the model for. An epoch refers to one diff --git a/src/sdk/models/shared/createmoderationrequest.ts b/src/sdk/models/shared/createmoderationrequest.ts index 91368ec..a1db94a 100755 --- a/src/sdk/models/shared/createmoderationrequest.ts +++ b/src/sdk/models/shared/createmoderationrequest.ts @@ -5,6 +5,19 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; +/** + * Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + * + * @remarks + * + * The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + * + */ +export enum CreateModerationRequestModel2 { + TextModerationLatest = "text-moderation-latest", + TextModerationStable = "text-moderation-stable", +} + export class CreateModerationRequest extends SpeakeasyBase { /** * The input text to classify @@ -23,5 +36,5 @@ export class CreateModerationRequest extends SpeakeasyBase { */ @SpeakeasyMetadata() @Expose({ name: "model" }) - model?: string; + model?: any; } diff --git a/src/sdk/models/shared/createsearchrequest.ts b/src/sdk/models/shared/createsearchrequest.ts deleted file mode 100755 index 08bec4f..0000000 --- a/src/sdk/models/shared/createsearchrequest.ts +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; - -export class CreateSearchRequest extends SpeakeasyBase { - /** - * Up to 200 documents to search over, provided as a list of strings. - * - * @remarks - * - * The maximum document length (in tokens) is 2034 minus the number of tokens in the query. - * - * You should specify either `documents` or a `file`, but not both. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "documents" }) - documents?: string[]; - - /** - * The ID of an uploaded file that contains documents to search over. - * - * @remarks - * - * You should specify either `documents` or a `file`, but not both. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "file" }) - file?: string; - - /** - * The maximum number of documents to be re-ranked and returned by search. - * - * @remarks - * - * This flag only takes effect when `file` is set. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "max_rerank" }) - maxRerank?: number; - - /** - * Query to search against the documents. - */ - @SpeakeasyMetadata() - @Expose({ name: "query" }) - query: string; - - /** - * A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a "metadata" field. - * - * @remarks - * - * This flag only takes effect when `file` is set. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "return_metadata" }) - returnMetadata?: boolean; - - @SpeakeasyMetadata() - @Expose({ name: "user" }) - user?: any; -} diff --git a/src/sdk/models/shared/createsearchresponse.ts b/src/sdk/models/shared/createsearchresponse.ts deleted file mode 100755 index 14dd5fc..0000000 --- a/src/sdk/models/shared/createsearchresponse.ts +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose, Type } from "class-transformer"; - -export class CreateSearchResponseData extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "document" }) - document?: number; - - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object?: string; - - @SpeakeasyMetadata() - @Expose({ name: "score" }) - score?: number; -} - -/** - * OK - */ -export class CreateSearchResponse extends SpeakeasyBase { - @SpeakeasyMetadata({ elemType: CreateSearchResponseData }) - @Expose({ name: "data" }) - @Type(() => CreateSearchResponseData) - data?: CreateSearchResponseData[]; - - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model?: string; - - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object?: string; -} diff --git a/src/sdk/models/shared/createtranscriptionrequest.ts b/src/sdk/models/shared/createtranscriptionrequest.ts index daf233c..a5f7c1f 100755 --- a/src/sdk/models/shared/createtranscriptionrequest.ts +++ b/src/sdk/models/shared/createtranscriptionrequest.ts @@ -12,6 +12,16 @@ export class CreateTranscriptionRequestFile extends SpeakeasyBase { file: string; } +/** + * ID of the model to use. Only `whisper-1` is currently available. + * + * @remarks + * + */ +export enum CreateTranscriptionRequestModel2 { + Whisper1 = "whisper-1", +} + export class CreateTranscriptionRequest extends SpeakeasyBase { /** * The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. @@ -37,8 +47,8 @@ export class CreateTranscriptionRequest extends SpeakeasyBase { * @remarks * */ - @SpeakeasyMetadata({ data: "multipart_form, name=model" }) - model: string; + @SpeakeasyMetadata({ data: "multipart_form, name=model;json=true" }) + model: any; /** * An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. diff --git a/src/sdk/models/shared/createtranslationrequest.ts b/src/sdk/models/shared/createtranslationrequest.ts index 7c7dccd..f085a94 100755 --- a/src/sdk/models/shared/createtranslationrequest.ts +++ b/src/sdk/models/shared/createtranslationrequest.ts @@ -12,6 +12,16 @@ export class CreateTranslationRequestFile extends SpeakeasyBase { file: string; } +/** + * ID of the model to use. Only `whisper-1` is currently available. + * + * @remarks + * + */ +export enum CreateTranslationRequestModel2 { + Whisper1 = "whisper-1", +} + export class CreateTranslationRequest extends SpeakeasyBase { /** * The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. @@ -28,8 +38,8 @@ export class CreateTranslationRequest extends SpeakeasyBase { * @remarks * */ - @SpeakeasyMetadata({ data: "multipart_form, name=model" }) - model: string; + @SpeakeasyMetadata({ data: "multipart_form, name=model;json=true" }) + model: any; /** * An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. diff --git a/src/sdk/models/shared/engine.ts b/src/sdk/models/shared/engine.ts deleted file mode 100755 index 7b329fd..0000000 --- a/src/sdk/models/shared/engine.ts +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; - -/** - * OK - */ -export class Engine extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "created" }) - created: number; - - @SpeakeasyMetadata() - @Expose({ name: "id" }) - id: string; - - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; - - @SpeakeasyMetadata() - @Expose({ name: "ready" }) - ready: boolean; -} diff --git a/src/sdk/models/shared/index.ts b/src/sdk/models/shared/index.ts index 0342142..35712b6 100755 --- a/src/sdk/models/shared/index.ts +++ b/src/sdk/models/shared/index.ts @@ -5,12 +5,8 @@ export * from "./chatcompletionfunctions"; export * from "./chatcompletionrequestmessage"; export * from "./chatcompletionresponsemessage"; -export * from "./createanswerrequest"; -export * from "./createanswerresponse"; export * from "./createchatcompletionrequest"; export * from "./createchatcompletionresponse"; -export * from "./createclassificationrequest"; -export * from "./createclassificationresponse"; export * from "./createcompletionrequest"; export * from "./createcompletionresponse"; export * from "./createeditrequest"; @@ -24,19 +20,15 @@ export * from "./createimagerequest"; export * from "./createimagevariationrequest"; export * from "./createmoderationrequest"; export * from "./createmoderationresponse"; -export * from "./createsearchrequest"; -export * from "./createsearchresponse"; export * from "./createtranscriptionrequest"; export * from "./createtranscriptionresponse"; export * from "./createtranslationrequest"; export * from "./createtranslationresponse"; export * from "./deletefileresponse"; export * from "./deletemodelresponse"; -export * from "./engine"; export * from "./finetune"; export * from "./finetuneevent"; export * from "./imagesresponse"; -export * from "./listenginesresponse"; export * from "./listfilesresponse"; export * from "./listfinetuneeventsresponse"; export * from "./listfinetunesresponse"; diff --git a/src/sdk/models/shared/listenginesresponse.ts b/src/sdk/models/shared/listenginesresponse.ts deleted file mode 100755 index ce86403..0000000 --- a/src/sdk/models/shared/listenginesresponse.ts +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Engine } from "./engine"; -import { Expose, Type } from "class-transformer"; - -/** - * OK - */ -export class ListEnginesResponse extends SpeakeasyBase { - @SpeakeasyMetadata({ elemType: Engine }) - @Expose({ name: "data" }) - @Type(() => Engine) - data: Engine[]; - - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; -} diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index f3daa91..635bb08 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -76,84 +76,6 @@ export class OpenAI { return res; } - /** - * Answers the specified question using the provided documents and examples. - * - * The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). - * - * - * @deprecated this method will be removed in a future release, please migrate away from it as soon as possible - */ - async createAnswer( - req: shared.CreateAnswerRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateAnswerRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/answers"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateAnswerResponse = new operations.CreateAnswerResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createAnswerResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.CreateAnswerResponse - ); - } - break; - } - - return res; - } - /** * Creates a model response for the given chat conversation. */ @@ -228,91 +150,6 @@ export class OpenAI { return res; } - /** - * Classifies the specified `query` using provided examples. - * - * The endpoint first [searches](/docs/api-reference/searches) over the labeled examples - * to select the ones most relevant for the particular query. Then, the relevant examples - * are combined with the query to construct a prompt to produce the final label via the - * [completions](/docs/api-reference/completions) endpoint. - * - * Labeled examples can be provided via an uploaded `file`, or explicitly listed in the - * request using the `examples` parameter for quick tests and small scale use cases. - * - * - * @deprecated this method will be removed in a future release, please migrate away from it as soon as possible - */ - async createClassification( - req: shared.CreateClassificationRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateClassificationRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/classifications"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateClassificationResponse = - new operations.CreateClassificationResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createClassificationResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.CreateClassificationResponse - ); - } - break; - } - - return res; - } - /** * Creates a completion for the provided prompt and parameters. */ @@ -971,90 +808,6 @@ export class OpenAI { return res; } - /** - * The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. - * - * To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. - * - * The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. - * - * - * @deprecated this method will be removed in a future release, please migrate away from it as soon as possible - */ - async createSearch( - req: operations.CreateSearchRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.CreateSearchRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = utils.generateURL(baseURL, "/engines/{engine_id}/search", req); - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody( - req, - "createSearchRequest", - "json" - ); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - - const headers = { ...reqBodyHeaders, ...config?.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); - headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateSearchResponse = new operations.CreateSearchResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createSearchResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.CreateSearchResponse - ); - } - break; - } - - return res; - } - /** * Transcribes audio into the input language. */ @@ -1130,7 +883,7 @@ export class OpenAI { } /** - * Translates audio into into English. + * Translates audio into English. */ async createTranslation( req: shared.CreateTranslationRequest, @@ -1379,61 +1132,6 @@ export class OpenAI { return res; } - /** - * Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. - * - * @deprecated this method will be removed in a future release, please migrate away from it as soon as possible - */ - async listEngines(config?: AxiosRequestConfig): Promise { - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/engines"; - - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - - const headers = { ...config?.headers }; - headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "get", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.ListEnginesResponse = new operations.ListEnginesResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.listEnginesResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.ListEnginesResponse - ); - } - break; - } - - return res; - } - /** * Returns a list of files that belong to the user's organization. */ @@ -1657,65 +1355,6 @@ export class OpenAI { return res; } - /** - * Retrieves a model instance, providing basic information about it such as the owner and availability. - * - * @deprecated this method will be removed in a future release, please migrate away from it as soon as possible - */ - async retrieveEngine( - req: operations.RetrieveEngineRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.RetrieveEngineRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = utils.generateURL(baseURL, "/engines/{engine_id}", req); - - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - - const headers = { ...config?.headers }; - headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "get", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.RetrieveEngineResponse = new operations.RetrieveEngineResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.engine = utils.objectToClass(JSON.parse(decodedRes), shared.Engine); - } - break; - } - - return res; - } - /** * Returns information about a specific file. */ diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 785e5e7..1661d7a 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -37,9 +37,9 @@ export class SDKConfiguration { serverURL: string; serverDefaults: any; language = "typescript"; - openapiDocVersion = "1.3.0"; - sdkVersion = "1.13.1"; - genVersion = "2.41.1"; + openapiDocVersion = "2.0.0"; + sdkVersion = "2.0.0"; + genVersion = "2.41.4"; public constructor(init?: Partial) { Object.assign(this, init); From 27c11ac27a174b8a91cf560e22b2f6042d056d9f Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 21 Jun 2023 01:26:00 +0000 Subject: [PATCH 10/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.49.1 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 0fcf71a..6abfddd 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -220,4 +220,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.49.0 (2.41.4) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.0.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.0.0 - . \ No newline at end of file +- [NPM v2.0.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.0.0 - . + +## 2023-06-21 01:25:35 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.49.1 (2.41.5) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.0.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.0.1 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index f5e6fbb..f3740fb 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 905cd76122997f2d869255c564530a31 docVersion: 2.0.0 - speakeasyVersion: 1.49.0 - generationVersion: 2.41.4 + speakeasyVersion: 1.49.1 + generationVersion: 2.41.5 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.0.0 + version: 2.0.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 149c67d..8b09977 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.0.0", + "version": "2.0.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.0.0", + "version": "2.0.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index a650259..b90baed 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.0.0", + "version": "2.0.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 1661d7a..18ba8ad 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.0.0"; - genVersion = "2.41.4"; + sdkVersion = "2.0.1"; + genVersion = "2.41.5"; public constructor(init?: Partial) { Object.assign(this, init); From 347e66429e69730f80ef71b1c3714ba1e55296ce Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 23 Jun 2023 01:19:30 +0000 Subject: [PATCH 11/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.50.1 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 6abfddd..877f686 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -228,4 +228,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.49.1 (2.41.5) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.0.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.0.1 - . \ No newline at end of file +- [NPM v2.0.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.0.1 - . + +## 2023-06-23 01:19:10 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.50.1 (2.43.2) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.1.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.1.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index f3740fb..dfde43c 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 905cd76122997f2d869255c564530a31 docVersion: 2.0.0 - speakeasyVersion: 1.49.1 - generationVersion: 2.41.5 + speakeasyVersion: 1.50.1 + generationVersion: 2.43.2 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.0.1 + version: 2.1.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 8b09977..7600e22 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.0.1", + "version": "2.1.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.0.1", + "version": "2.1.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index b90baed..ca91061 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.0.1", + "version": "2.1.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 18ba8ad..72967ca 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.0.1"; - genVersion = "2.41.5"; + sdkVersion = "2.1.0"; + genVersion = "2.43.2"; public constructor(init?: Partial) { Object.assign(this, init); From 8d7ba4c6d5807cbbfd0f17cc3cb4e177d85289b7 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Tue, 27 Jun 2023 01:20:51 +0000 Subject: [PATCH 12/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.51.1 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 877f686..e065c65 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -236,4 +236,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.50.1 (2.43.2) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.1.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.1.0 - . \ No newline at end of file +- [NPM v2.1.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.1.0 - . + +## 2023-06-27 01:20:30 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.51.1 (2.50.2) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.2.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.2.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index dfde43c..0930064 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 905cd76122997f2d869255c564530a31 docVersion: 2.0.0 - speakeasyVersion: 1.50.1 - generationVersion: 2.43.2 + speakeasyVersion: 1.51.1 + generationVersion: 2.50.2 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.1.0 + version: 2.2.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 7600e22..9b54227 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.1.0", + "version": "2.2.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.1.0", + "version": "2.2.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index ca91061..369a22f 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.1.0", + "version": "2.2.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 72967ca..ada4e17 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.1.0"; - genVersion = "2.43.2"; + sdkVersion = "2.2.0"; + genVersion = "2.50.2"; public constructor(init?: Partial) { Object.assign(this, init); From f85e294b1e23eb04a1364293d06663044743a17b Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Thu, 29 Jun 2023 01:18:38 +0000 Subject: [PATCH 13/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.51.3 --- RELEASES.md | 10 +++++++++- docs/models/shared/chatcompletionfunctions.md | 10 +++++----- docs/models/shared/chatcompletionrequestmessage.md | 2 +- .../shared/chatcompletionrequestmessagefunctioncall.md | 4 ++-- .../models/shared/createchatcompletionrequestmodel2.md | 3 +++ gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/models/shared/chatcompletionfunctions.ts | 8 ++++++-- src/sdk/models/shared/chatcompletionrequestmessage.ts | 8 ++++---- src/sdk/models/shared/createchatcompletionrequest.ts | 3 +++ src/sdk/sdk.ts | 4 ++-- 12 files changed, 42 insertions(+), 24 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index e065c65..6e188a6 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -244,4 +244,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.51.1 (2.50.2) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.2.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.2.0 - . \ No newline at end of file +- [NPM v2.2.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.2.0 - . + +## 2023-06-29 01:18:17 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.51.3 (2.52.2) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.3.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.3.0 - . \ No newline at end of file diff --git a/docs/models/shared/chatcompletionfunctions.md b/docs/models/shared/chatcompletionfunctions.md index 6902cc1..ccdcfc0 100755 --- a/docs/models/shared/chatcompletionfunctions.md +++ b/docs/models/shared/chatcompletionfunctions.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `description` | *string* | :heavy_minus_sign: | The description of what the function does. | -| `name` | *string* | :heavy_check_mark: | The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | -| `parameters` | Record | :heavy_minus_sign: | The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `description` | *string* | :heavy_minus_sign: | A description of what the function does, used by the model to choose when and how to call the function. | +| `name` | *string* | :heavy_check_mark: | The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | +| `parameters` | Record | :heavy_check_mark: | The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.

To describe a function that accepts no parameters, provide the value `{"type": "object", "properties": {}}`. | \ No newline at end of file diff --git a/docs/models/shared/chatcompletionrequestmessage.md b/docs/models/shared/chatcompletionrequestmessage.md index 35ef172..5e4eb36 100755 --- a/docs/models/shared/chatcompletionrequestmessage.md +++ b/docs/models/shared/chatcompletionrequestmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `content` | *string* | :heavy_minus_sign: | The contents of the message. `content` is required for all messages except assistant messages with function calls. | +| `content` | *string* | :heavy_check_mark: | The contents of the message. `content` is required for all messages, and may be null for assistant messages with function calls. | | `functionCall` | [ChatCompletionRequestMessageFunctionCall](../../models/shared/chatcompletionrequestmessagefunctioncall.md) | :heavy_minus_sign: | The name and arguments of a function that should be called, as generated by the model. | | `name` | *string* | :heavy_minus_sign: | The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. | | `role` | [ChatCompletionRequestMessageRole](../../models/shared/chatcompletionrequestmessagerole.md) | :heavy_check_mark: | The role of the messages author. One of `system`, `user`, `assistant`, or `function`. | \ No newline at end of file diff --git a/docs/models/shared/chatcompletionrequestmessagefunctioncall.md b/docs/models/shared/chatcompletionrequestmessagefunctioncall.md index bf1c936..7908f85 100755 --- a/docs/models/shared/chatcompletionrequestmessagefunctioncall.md +++ b/docs/models/shared/chatcompletionrequestmessagefunctioncall.md @@ -7,5 +7,5 @@ The name and arguments of a function that should be called, as generated by the | Field | Type | Required | Description | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `arguments` | *string* | :heavy_minus_sign: | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | -| `name` | *string* | :heavy_minus_sign: | The name of the function to call. | \ No newline at end of file +| `arguments` | *string* | :heavy_check_mark: | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | +| `name` | *string* | :heavy_check_mark: | The name of the function to call. | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionrequestmodel2.md b/docs/models/shared/createchatcompletionrequestmodel2.md index ba2001a..20cd4e4 100755 --- a/docs/models/shared/createchatcompletionrequestmodel2.md +++ b/docs/models/shared/createchatcompletionrequestmodel2.md @@ -8,10 +8,13 @@ ID of the model to use. See the [model endpoint compatibility](/docs/models/mode | Name | Value | | ---------------------- | ---------------------- | | `Gpt4` | gpt-4 | +| `Gpt40314` | gpt-4-0314 | | `Gpt40613` | gpt-4-0613 | | `Gpt432k` | gpt-4-32k | +| `Gpt432k0314` | gpt-4-32k-0314 | | `Gpt432k0613` | gpt-4-32k-0613 | | `Gpt35Turbo` | gpt-3.5-turbo | | `Gpt35Turbo16k` | gpt-3.5-turbo-16k | +| `Gpt35Turbo0301` | gpt-3.5-turbo-0301 | | `Gpt35Turbo0613` | gpt-3.5-turbo-0613 | | `Gpt35Turbo16k0613` | gpt-3.5-turbo-16k-0613 | \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 0930064..ea71abd 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,16 +1,16 @@ configVersion: 1.0.0 management: - docChecksum: 905cd76122997f2d869255c564530a31 + docChecksum: 6d34df9483cd54fc1d57e4ecc220dcf2 docVersion: 2.0.0 - speakeasyVersion: 1.51.1 - generationVersion: 2.50.2 + speakeasyVersion: 1.51.3 + generationVersion: 2.52.2 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.2.0 + version: 2.3.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 9b54227..0d36564 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.2.0", + "version": "2.3.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.2.0", + "version": "2.3.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 369a22f..139d6e5 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.2.0", + "version": "2.3.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/chatcompletionfunctions.ts b/src/sdk/models/shared/chatcompletionfunctions.ts index 635426f..1d07e95 100755 --- a/src/sdk/models/shared/chatcompletionfunctions.ts +++ b/src/sdk/models/shared/chatcompletionfunctions.ts @@ -7,7 +7,7 @@ import { Expose } from "class-transformer"; export class ChatCompletionFunctions extends SpeakeasyBase { /** - * The description of what the function does. + * A description of what the function does, used by the model to choose when and how to call the function. */ @SpeakeasyMetadata() @Expose({ name: "description" }) @@ -22,8 +22,12 @@ export class ChatCompletionFunctions extends SpeakeasyBase { /** * The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + * + * @remarks + * + * To describe a function that accepts no parameters, provide the value `{"type": "object", "properties": {}}`. */ @SpeakeasyMetadata() @Expose({ name: "parameters" }) - parameters?: Record; + parameters: Record; } diff --git a/src/sdk/models/shared/chatcompletionrequestmessage.ts b/src/sdk/models/shared/chatcompletionrequestmessage.ts index 16ebe4c..7118e65 100755 --- a/src/sdk/models/shared/chatcompletionrequestmessage.ts +++ b/src/sdk/models/shared/chatcompletionrequestmessage.ts @@ -14,14 +14,14 @@ export class ChatCompletionRequestMessageFunctionCall extends SpeakeasyBase { */ @SpeakeasyMetadata() @Expose({ name: "arguments" }) - arguments?: string; + arguments: string; /** * The name of the function to call. */ @SpeakeasyMetadata() @Expose({ name: "name" }) - name?: string; + name: string; } /** @@ -36,11 +36,11 @@ export enum ChatCompletionRequestMessageRole { export class ChatCompletionRequestMessage extends SpeakeasyBase { /** - * The contents of the message. `content` is required for all messages except assistant messages with function calls. + * The contents of the message. `content` is required for all messages, and may be null for assistant messages with function calls. */ @SpeakeasyMetadata() @Expose({ name: "content" }) - content?: string; + content: string; /** * The name and arguments of a function that should be called, as generated by the model. diff --git a/src/sdk/models/shared/createchatcompletionrequest.ts b/src/sdk/models/shared/createchatcompletionrequest.ts index b93376b..7301a52 100755 --- a/src/sdk/models/shared/createchatcompletionrequest.ts +++ b/src/sdk/models/shared/createchatcompletionrequest.ts @@ -42,11 +42,14 @@ export class CreateChatCompletionRequestLogitBias extends SpeakeasyBase {} */ export enum CreateChatCompletionRequestModel2 { Gpt4 = "gpt-4", + Gpt40314 = "gpt-4-0314", Gpt40613 = "gpt-4-0613", Gpt432k = "gpt-4-32k", + Gpt432k0314 = "gpt-4-32k-0314", Gpt432k0613 = "gpt-4-32k-0613", Gpt35Turbo = "gpt-3.5-turbo", Gpt35Turbo16k = "gpt-3.5-turbo-16k", + Gpt35Turbo0301 = "gpt-3.5-turbo-0301", Gpt35Turbo0613 = "gpt-3.5-turbo-0613", Gpt35Turbo16k0613 = "gpt-3.5-turbo-16k-0613", } diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index ada4e17..5a755ef 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.2.0"; - genVersion = "2.50.2"; + sdkVersion = "2.3.0"; + genVersion = "2.52.2"; public constructor(init?: Partial) { Object.assign(this, init); From e707a98ac5ddd1abc557db8b163fdbff97d4920d Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sat, 1 Jul 2023 01:26:10 +0000 Subject: [PATCH 14/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.52.0 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/internal/utils/queryparams.ts | 26 +++++++++++++++++--------- src/sdk/sdk.ts | 4 ++-- 6 files changed, 34 insertions(+), 18 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 6e188a6..041b14c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -252,4 +252,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.51.3 (2.52.2) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.3.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.3.0 - . \ No newline at end of file +- [NPM v2.3.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.3.0 - . + +## 2023-07-01 01:25:51 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.52.0 (2.55.0) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.4.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.4.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index ea71abd..eb9a18a 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 6d34df9483cd54fc1d57e4ecc220dcf2 docVersion: 2.0.0 - speakeasyVersion: 1.51.3 - generationVersion: 2.52.2 + speakeasyVersion: 1.52.0 + generationVersion: 2.55.0 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.3.0 + version: 2.4.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 0d36564..369de48 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.3.0", + "version": "2.4.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.3.0", + "version": "2.4.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 139d6e5..29268d0 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.3.0", + "version": "2.4.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/internal/utils/queryparams.ts b/src/internal/utils/queryparams.ts index cd39120..90f5653 100755 --- a/src/internal/utils/queryparams.ts +++ b/src/internal/utils/queryparams.ts @@ -2,16 +2,24 @@ * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. */ -import {ParamDecorator, parseParamDecorator, populateFromGlobals, shouldQueryParamSerialize, valToString} from "./utils"; +import { + ParamDecorator, + parseParamDecorator, + populateFromGlobals, + shouldQueryParamSerialize, + valToString +} from "./utils"; import {requestMetadataKey} from "./requestbody"; export const qpMetadataKey = "queryParam"; const queryStringPrefix = "?"; +const filterAndJoin = (strings: string[]):string => strings.filter(s => !!s).join("&") + export function serializeQueryParams(queryParams: any, globals?: any): string { const queryStringParts: string[] = []; - if (!queryParams) return queryStringParts.join("&"); + if (!queryParams) return filterAndJoin(queryStringParts); const fieldNames: string[] = "__props__" in queryParams @@ -84,7 +92,7 @@ export function serializeQueryParams(queryParams: any, globals?: any): string { } } }); - return queryStringPrefix + queryStringParts.join("&"); + return queryStringPrefix + filterAndJoin(queryStringParts); } // TODO: Add support for disabling percent encoding for reserved characters @@ -94,7 +102,7 @@ function jsonSerializer(params: Record): string { Object.entries(Object.assign({}, params)).forEach(([key, value]) => { query.push(`${key}=${encodeURIComponent(JSON.stringify(value))}`); }); - return query.join("&"); + return filterAndJoin(query); } // TODO: Add support for disabling percent encoding for reserved characters @@ -133,7 +141,7 @@ function noExplodeSerializer(params: Record, delimiter = ","): stri query.push(`${key}=${encodeURIComponent(values)}`); } }); - return query.join("&"); + return filterAndJoin(query); } // TODO: Add support for disabling percent encoding for reserved characters @@ -146,9 +154,9 @@ function formSerializerExplode(params: Record): string { query.push(`${key}=${encodeURIComponent(value)}`); else if (Array.isArray(value)) { query.push( - value + value .map((aValue) => `${key}=${encodeURIComponent(valToString(aValue))}`) - .join("&") + .join("&") ); } else query.push( @@ -176,7 +184,7 @@ function formSerializerExplode(params: Record): string { .join("&") ); }); - return query.join("&"); + return filterAndJoin(query); } // TODO: Add support for disabling percent encoding for reserved characters @@ -232,5 +240,5 @@ function deepObjectSerializer(params: Record): string { .join("&") ); }); - return query.join("&"); + return filterAndJoin(query); } diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 5a755ef..6966cf1 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.3.0"; - genVersion = "2.52.2"; + sdkVersion = "2.4.0"; + genVersion = "2.55.0"; public constructor(init?: Partial) { Object.assign(this, init); From c2f979070cb2507b802ec436458d205c73004fea Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Tue, 4 Jul 2023 01:20:27 +0000 Subject: [PATCH 15/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.52.0 --- RELEASES.md | 10 +- .../shared/createchatcompletionrequest.md | 2 +- .../createchatcompletionrequestlogitbias.md | 12 -- docs/models/shared/createcompletionrequest.md | 2 +- .../createcompletionrequestlogitbias.md | 14 -- ...createcompletionresponsechoiceslogprobs.md | 12 +- ...etionresponsechoiceslogprobstoplogprobs.md | 7 - .../createeditresponsechoiceslogprobs.md | 12 +- ...eeditresponsechoiceslogprobstoplogprobs.md | 7 - docs/models/shared/finetunehyperparams.md | 11 +- docs/models/shared/openaifile.md | 20 +-- docs/models/shared/openaifilestatusdetails.md | 7 - docs/sdks/openai/README.md | 161 +++++++++--------- files.gen | 5 - gen.yaml | 4 +- package-lock.json | 4 +- package.json | 2 +- .../shared/createchatcompletionrequest.ts | 13 +- .../models/shared/createcompletionrequest.ts | 17 +- .../models/shared/createcompletionresponse.ts | 7 +- src/sdk/models/shared/createeditresponse.ts | 7 +- src/sdk/models/shared/finetune.ts | 30 +++- src/sdk/models/shared/openaifile.ts | 7 +- src/sdk/sdk.ts | 2 +- 24 files changed, 165 insertions(+), 210 deletions(-) delete mode 100755 docs/models/shared/createchatcompletionrequestlogitbias.md delete mode 100755 docs/models/shared/createcompletionrequestlogitbias.md delete mode 100755 docs/models/shared/createcompletionresponsechoiceslogprobstoplogprobs.md delete mode 100755 docs/models/shared/createeditresponsechoiceslogprobstoplogprobs.md delete mode 100755 docs/models/shared/openaifilestatusdetails.md diff --git a/RELEASES.md b/RELEASES.md index 041b14c..d9b5cc2 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -260,4 +260,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.52.0 (2.55.0) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.4.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.4.0 - . \ No newline at end of file +- [NPM v2.4.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.4.0 - . + +## 2023-07-04 01:20:06 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.52.0 (2.55.0) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.4.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.4.1 - . \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionrequest.md b/docs/models/shared/createchatcompletionrequest.md index f6ac246..4e14305 100755 --- a/docs/models/shared/createchatcompletionrequest.md +++ b/docs/models/shared/createchatcompletionrequest.md @@ -8,7 +8,7 @@ | `frequencyPenalty` | *number* | :heavy_minus_sign: | completions_frequency_penalty_description | | | `functionCall` | *any* | :heavy_minus_sign: | Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. | | | `functions` | [ChatCompletionFunctions](../../models/shared/chatcompletionfunctions.md)[] | :heavy_minus_sign: | A list of functions the model may generate JSON inputs for. | | -| `logitBias` | [CreateChatCompletionRequestLogitBias](../../models/shared/createchatcompletionrequestlogitbias.md) | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
| | +| `logitBias` | Record | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
| | | `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the chat completion.

The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| | | `messages` | [ChatCompletionRequestMessage](../../models/shared/chatcompletionrequestmessage.md)[] | :heavy_check_mark: | A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). | | | `model` | *any* | :heavy_check_mark: | ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. | | diff --git a/docs/models/shared/createchatcompletionrequestlogitbias.md b/docs/models/shared/createchatcompletionrequestlogitbias.md deleted file mode 100755 index 6ad8754..0000000 --- a/docs/models/shared/createchatcompletionrequestlogitbias.md +++ /dev/null @@ -1,12 +0,0 @@ -# CreateChatCompletionRequestLogitBias - -Modify the likelihood of specified tokens appearing in the completion. - -Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - - - -## Fields - -| Field | Type | Required | Description | -| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/shared/createcompletionrequest.md b/docs/models/shared/createcompletionrequest.md index d33ed0f..0fb659a 100755 --- a/docs/models/shared/createcompletionrequest.md +++ b/docs/models/shared/createcompletionrequest.md @@ -8,7 +8,7 @@ | `bestOf` | *number* | :heavy_minus_sign: | Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed.

When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.

**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
| | | `echo` | *boolean* | :heavy_minus_sign: | Echo back the prompt in addition to the completion
| | | `frequencyPenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.

[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
| | -| `logitBias` | [CreateCompletionRequestLogitBias](../../models/shared/createcompletionrequestlogitbias.md) | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.

As an example, you can pass `{"50256": -100}` to prevent the <\|endoftext\|> token from being generated.
| | +| `logitBias` | Record | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.

As an example, you can pass `{"50256": -100}` to prevent the <\|endoftext\|> token from being generated.
| | | `logprobs` | *number* | :heavy_minus_sign: | Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.

The maximum value for `logprobs` is 5.
| | | `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the completion.

The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| 16 | | `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
| | diff --git a/docs/models/shared/createcompletionrequestlogitbias.md b/docs/models/shared/createcompletionrequestlogitbias.md deleted file mode 100755 index f69d6bf..0000000 --- a/docs/models/shared/createcompletionrequestlogitbias.md +++ /dev/null @@ -1,14 +0,0 @@ -# CreateCompletionRequestLogitBias - -Modify the likelihood of specified tokens appearing in the completion. - -Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - -As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. - - - -## Fields - -| Field | Type | Required | Description | -| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponsechoiceslogprobs.md b/docs/models/shared/createcompletionresponsechoiceslogprobs.md index c9f707b..4aaffe6 100755 --- a/docs/models/shared/createcompletionresponsechoiceslogprobs.md +++ b/docs/models/shared/createcompletionresponsechoiceslogprobs.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| --------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | -| `textOffset` | *number*[] | :heavy_minus_sign: | N/A | -| `tokenLogprobs` | *number*[] | :heavy_minus_sign: | N/A | -| `tokens` | *string*[] | :heavy_minus_sign: | N/A | -| `topLogprobs` | [CreateCompletionResponseChoicesLogprobsTopLogprobs](../../models/shared/createcompletionresponsechoiceslogprobstoplogprobs.md)[] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------- | -------------------------- | -------------------------- | -------------------------- | +| `textOffset` | *number*[] | :heavy_minus_sign: | N/A | +| `tokenLogprobs` | *number*[] | :heavy_minus_sign: | N/A | +| `tokens` | *string*[] | :heavy_minus_sign: | N/A | +| `topLogprobs` | Record[] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponsechoiceslogprobstoplogprobs.md b/docs/models/shared/createcompletionresponsechoiceslogprobstoplogprobs.md deleted file mode 100755 index 7d341de..0000000 --- a/docs/models/shared/createcompletionresponsechoiceslogprobstoplogprobs.md +++ /dev/null @@ -1,7 +0,0 @@ -# CreateCompletionResponseChoicesLogprobsTopLogprobs - - -## Fields - -| Field | Type | Required | Description | -| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/shared/createeditresponsechoiceslogprobs.md b/docs/models/shared/createeditresponsechoiceslogprobs.md index a727a71..e7b154c 100755 --- a/docs/models/shared/createeditresponsechoiceslogprobs.md +++ b/docs/models/shared/createeditresponsechoiceslogprobs.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | -| `textOffset` | *number*[] | :heavy_minus_sign: | N/A | -| `tokenLogprobs` | *number*[] | :heavy_minus_sign: | N/A | -| `tokens` | *string*[] | :heavy_minus_sign: | N/A | -| `topLogprobs` | [CreateEditResponseChoicesLogprobsTopLogprobs](../../models/shared/createeditresponsechoiceslogprobstoplogprobs.md)[] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------- | -------------------------- | -------------------------- | -------------------------- | +| `textOffset` | *number*[] | :heavy_minus_sign: | N/A | +| `tokenLogprobs` | *number*[] | :heavy_minus_sign: | N/A | +| `tokens` | *string*[] | :heavy_minus_sign: | N/A | +| `topLogprobs` | Record[] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createeditresponsechoiceslogprobstoplogprobs.md b/docs/models/shared/createeditresponsechoiceslogprobstoplogprobs.md deleted file mode 100755 index a81f3c2..0000000 --- a/docs/models/shared/createeditresponsechoiceslogprobstoplogprobs.md +++ /dev/null @@ -1,7 +0,0 @@ -# CreateEditResponseChoicesLogprobsTopLogprobs - - -## Fields - -| Field | Type | Required | Description | -| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/shared/finetunehyperparams.md b/docs/models/shared/finetunehyperparams.md index 09a15c4..c3c6f67 100755 --- a/docs/models/shared/finetunehyperparams.md +++ b/docs/models/shared/finetunehyperparams.md @@ -3,5 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `batchSize` | *number* | :heavy_check_mark: | N/A | +| `classificationNClasses` | *number* | :heavy_minus_sign: | N/A | +| `classificationPositiveClass` | *string* | :heavy_minus_sign: | N/A | +| `computeClassificationMetrics` | *boolean* | :heavy_minus_sign: | N/A | +| `learningRateMultiplier` | *number* | :heavy_check_mark: | N/A | +| `nEpochs` | *number* | :heavy_check_mark: | N/A | +| `promptLossWeight` | *number* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/openaifile.md b/docs/models/shared/openaifile.md index affa24a..4cb9697 100755 --- a/docs/models/shared/openaifile.md +++ b/docs/models/shared/openaifile.md @@ -5,13 +5,13 @@ OK ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | -| `bytes` | *number* | :heavy_check_mark: | N/A | -| `createdAt` | *number* | :heavy_check_mark: | N/A | -| `filename` | *string* | :heavy_check_mark: | N/A | -| `id` | *string* | :heavy_check_mark: | N/A | -| `object` | *string* | :heavy_check_mark: | N/A | -| `purpose` | *string* | :heavy_check_mark: | N/A | -| `status` | *string* | :heavy_minus_sign: | N/A | -| `statusDetails` | [OpenAIFileStatusDetails](../../models/shared/openaifilestatusdetails.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `bytes` | *number* | :heavy_check_mark: | N/A | +| `createdAt` | *number* | :heavy_check_mark: | N/A | +| `filename` | *string* | :heavy_check_mark: | N/A | +| `id` | *string* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | +| `purpose` | *string* | :heavy_check_mark: | N/A | +| `status` | *string* | :heavy_minus_sign: | N/A | +| `statusDetails` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/openaifilestatusdetails.md b/docs/models/shared/openaifilestatusdetails.md deleted file mode 100755 index adf4a8d..0000000 --- a/docs/models/shared/openaifilestatusdetails.md +++ /dev/null @@ -1,7 +0,0 @@ -# OpenAIFileStatusDetails - - -## Fields - -| Field | Type | Required | Description | -| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index 96ef7ef..917f922 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -140,36 +140,35 @@ sdk.openAI.createChatCompletion({ }, }, ], - logitBias: {}, - maxTokens: 736918, + logitBias: { + "esse": 216550, + "excepturi": 135218, + "perferendis": 324141, + }, + maxTokens: 617636, messages: [ { - content: "ipsum", + content: "iste", functionCall: { - arguments: "excepturi", - name: "Dorothy Hane", + arguments: "dolor", + name: "Lester Welch", }, - name: "Curtis Morissette", - role: ChatCompletionRequestMessageRole.Function, - }, - { - content: "fuga", - functionCall: { - arguments: "in", - name: "Sheryl Kertzmann", - }, - name: "Brenda Wisozk", + name: "Stacy Moore", role: ChatCompletionRequestMessageRole.Assistant, }, ], model: "gpt-3.5-turbo", n: 1, - presencePenalty: 2103.82, - stop: "explicabo", + presencePenalty: 602.25, + stop: [ + "mollitia", + "laborum", + "dolores", + ], stream: false, temperature: 1, topP: 1, - user: "nobis", + user: "dolorem", }).then((res: CreateChatCompletionResponse) => { if (res.statusCode == 200) { // handle response @@ -204,25 +203,23 @@ import { CreateCompletionRequestModel2, CreateCompletionResponseChoicesFinishRea const sdk = new Gpt(); sdk.openAI.createCompletion({ - bestOf: 315428, + bestOf: 358152, echo: false, - frequencyPenalty: 6078.31, - logitBias: {}, - logprobs: 363711, + frequencyPenalty: 1289.26, + logitBias: { + "enim": 607831, + "nemo": 325047, + "excepturi": 38425, + "iure": 634274, + }, + logprobs: 988374, maxTokens: 16, - model: "excepturi", + model: CreateCompletionRequestModel2.TextDavinci003, n: 1, - presencePenalty: 384.25, - prompt: [ - "This is a test.", - "This is a test.", - "This is a test.", - ], + presencePenalty: 6527.9, + prompt: "This is a test.", stop: [ "["\n"]", - "["\n"]", - "["\n"]", - "["\n"]", ], stream: false, suffix: "test.", @@ -265,7 +262,7 @@ const sdk = new Gpt(); sdk.openAI.createEdit({ input: "What day of the wek is it?", instruction: "Fix the spelling mistakes.", - model: "text-davinci-edit-001", + model: CreateEditRequestModel2.TextDavinciEdit001, n: 1, temperature: 1, topP: 1, @@ -304,10 +301,12 @@ const sdk = new Gpt(); sdk.openAI.createEmbedding({ input: [ - 635059, + 253291, + 414369, + 466311, ], model: "text-embedding-ada-002", - user: "repellat", + user: "velit", }).then((res: CreateEmbeddingResponse) => { if (res.statusCode == 200) { // handle response @@ -343,10 +342,10 @@ const sdk = new Gpt(); sdk.openAI.createFile({ file: { - content: "mollitia".encode(), - file: "occaecati", + content: "error".encode(), + file: "quia", }, - purpose: "numquam", + purpose: "quis", }).then((res: CreateFileResponse) => { if (res.statusCode == 200) { // handle response @@ -386,19 +385,20 @@ import { CreateFineTuneRequestModel2 } from "@speakeasy-api/openai/dist/sdk/mode const sdk = new Gpt(); sdk.openAI.createFineTune({ - batchSize: 414369, + batchSize: 110375, classificationBetas: [ - 4746.97, - 2444.25, + 6563.3, + 3172.02, + 1381.83, ], - classificationNClasses: 623510, - classificationPositiveClass: "quia", + classificationNClasses: 778346, + classificationPositiveClass: "sequi", computeClassificationMetrics: false, - learningRateMultiplier: 3380.07, + learningRateMultiplier: 9495.72, model: "curie", - nEpochs: 674752, - promptLossWeight: 6563.3, - suffix: "enim", + nEpochs: 662527, + promptLossWeight: 8209.94, + suffix: "aut", trainingFile: "file-ajSREls59WBbvgSzJSVWxMCB", validationFile: "file-XjSREls59WBbvgSzJSVWxMCa", }).then((res: CreateFineTuneResponse) => { @@ -439,7 +439,7 @@ sdk.openAI.createImage({ prompt: "A cute baby sea otter", responseFormat: CreateImageRequestResponseFormat.Url, size: CreateImageRequestSize.OneThousandAndTwentyFourx1024, - user: "odit", + user: "quasi", }).then((res: CreateImageResponse) => { if (res.statusCode == 200) { // handle response @@ -474,18 +474,18 @@ const sdk = new Gpt(); sdk.openAI.createImageEdit({ image: { - content: "quo".encode(), - image: "sequi", + content: "error".encode(), + image: "temporibus", }, mask: { - content: "tenetur".encode(), - mask: "ipsam", + content: "laborum".encode(), + mask: "quasi", }, - n: "id", + n: "reiciendis", prompt: "A cute baby sea otter wearing a beret", - responseFormat: "possimus", - size: "aut", - user: "quasi", + responseFormat: "voluptatibus", + size: "vero", + user: "nihil", }).then((res: CreateImageEditResponse) => { if (res.statusCode == 200) { // handle response @@ -520,13 +520,13 @@ const sdk = new Gpt(); sdk.openAI.createImageVariation({ image: { - content: "error".encode(), - image: "temporibus", + content: "praesentium".encode(), + image: "voluptatibus", }, - n: "laborum", - responseFormat: "quasi", - size: "reiciendis", - user: "voluptatibus", + n: "ipsa", + responseFormat: "omnis", + size: "voluptate", + user: "cum", }).then((res: CreateImageVariationResponse) => { if (res.statusCode == 200) { // handle response @@ -561,11 +561,8 @@ import { CreateModerationRequestModel2 } from "@speakeasy-api/openai/dist/sdk/mo const sdk = new Gpt(); sdk.openAI.createModeration({ - input: [ - "I want to kill them.", - "I want to kill them.", - ], - model: CreateModerationRequestModel2.TextModerationStable, + input: "I want to kill them.", + model: "text-moderation-stable", }).then((res: CreateModerationResponse) => { if (res.statusCode == 200) { // handle response @@ -601,14 +598,14 @@ const sdk = new Gpt(); sdk.openAI.createTranscription({ file: { - content: "voluptatibus".encode(), - file: "ipsa", + content: "reprehenderit".encode(), + file: "ut", }, - language: "omnis", + language: "maiores", model: "whisper-1", - prompt: "cum", - responseFormat: "perferendis", - temperature: 391.87, + prompt: "corporis", + responseFormat: "dolore", + temperature: 4808.94, }).then((res: CreateTranscriptionResponse) => { if (res.statusCode == 200) { // handle response @@ -644,13 +641,13 @@ const sdk = new Gpt(); sdk.openAI.createTranslation({ file: { - content: "reprehenderit".encode(), - file: "ut", + content: "dicta".encode(), + file: "harum", }, - model: CreateTranslationRequestModel2.Whisper1, - prompt: "dicta", - responseFormat: "corporis", - temperature: 2961.4, + model: "whisper-1", + prompt: "accusamus", + responseFormat: "commodi", + temperature: 9182.36, }).then((res: CreateTranslationResponse) => { if (res.statusCode == 200) { // handle response @@ -684,7 +681,7 @@ import { DeleteFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operat const sdk = new Gpt(); sdk.openAI.deleteFile({ - fileId: "iusto", + fileId: "quae", }).then((res: DeleteFileResponse) => { if (res.statusCode == 200) { // handle response @@ -752,7 +749,7 @@ import { DownloadFileResponse } from "@speakeasy-api/openai/dist/sdk/models/oper const sdk = new Gpt(); sdk.openAI.downloadFile({ - fileId: "dicta", + fileId: "ipsum", }).then((res: DownloadFileResponse) => { if (res.statusCode == 200) { // handle response @@ -916,7 +913,7 @@ import { RetrieveFileResponse } from "@speakeasy-api/openai/dist/sdk/models/oper const sdk = new Gpt(); sdk.openAI.retrieveFile({ - fileId: "harum", + fileId: "quidem", }).then((res: RetrieveFileResponse) => { if (res.statusCode == 200) { // handle response diff --git a/files.gen b/files.gen index e619681..881278a 100755 --- a/files.gen +++ b/files.gen @@ -112,7 +112,6 @@ docs/models/operations/retrievemodelrequest.md docs/models/operations/retrievemodelresponse.md docs/models/shared/finetunehyperparams.md docs/models/shared/finetune.md -docs/models/shared/openaifilestatusdetails.md docs/models/shared/openaifile.md docs/models/shared/finetuneevent.md docs/models/shared/createchatcompletionresponsechoicesfinishreason.md @@ -124,7 +123,6 @@ docs/models/shared/chatcompletionresponsemessagerole.md docs/models/shared/chatcompletionresponsemessage.md docs/models/shared/createchatcompletionrequestfunctioncall2.md docs/models/shared/createchatcompletionrequestfunctioncall1.md -docs/models/shared/createchatcompletionrequestlogitbias.md docs/models/shared/createchatcompletionrequestmodel2.md docs/models/shared/createchatcompletionrequest.md docs/models/shared/chatcompletionrequestmessagefunctioncall.md @@ -132,16 +130,13 @@ docs/models/shared/chatcompletionrequestmessagerole.md docs/models/shared/chatcompletionrequestmessage.md docs/models/shared/chatcompletionfunctions.md docs/models/shared/createcompletionresponsechoicesfinishreason.md -docs/models/shared/createcompletionresponsechoiceslogprobstoplogprobs.md docs/models/shared/createcompletionresponsechoiceslogprobs.md docs/models/shared/createcompletionresponsechoices.md docs/models/shared/createcompletionresponseusage.md docs/models/shared/createcompletionresponse.md -docs/models/shared/createcompletionrequestlogitbias.md docs/models/shared/createcompletionrequestmodel2.md docs/models/shared/createcompletionrequest.md docs/models/shared/createeditresponsechoicesfinishreason.md -docs/models/shared/createeditresponsechoiceslogprobstoplogprobs.md docs/models/shared/createeditresponsechoiceslogprobs.md docs/models/shared/createeditresponsechoices.md docs/models/shared/createeditresponseusage.md diff --git a/gen.yaml b/gen.yaml index eb9a18a..be93b41 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,6 +1,6 @@ configVersion: 1.0.0 management: - docChecksum: 6d34df9483cd54fc1d57e4ecc220dcf2 + docChecksum: 13e9399c8d0f4df4990f9c1ff3b98ef2 docVersion: 2.0.0 speakeasyVersion: 1.52.0 generationVersion: 2.55.0 @@ -10,7 +10,7 @@ generation: singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.4.0 + version: 2.4.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 369de48..0b40237 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.4.0", + "version": "2.4.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.4.0", + "version": "2.4.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 29268d0..d46caa6 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.4.0", + "version": "2.4.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/createchatcompletionrequest.ts b/src/sdk/models/shared/createchatcompletionrequest.ts index 7301a52..6ae2b5b 100755 --- a/src/sdk/models/shared/createchatcompletionrequest.ts +++ b/src/sdk/models/shared/createchatcompletionrequest.ts @@ -27,16 +27,6 @@ export enum CreateChatCompletionRequestFunctionCall1 { Auto = "auto", } -/** - * Modify the likelihood of specified tokens appearing in the completion. - * - * @remarks - * - * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - * - */ -export class CreateChatCompletionRequestLogitBias extends SpeakeasyBase {} - /** * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. */ @@ -87,8 +77,7 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { */ @SpeakeasyMetadata() @Expose({ name: "logit_bias" }) - @Type(() => CreateChatCompletionRequestLogitBias) - logitBias?: CreateChatCompletionRequestLogitBias; + logitBias?: Record; /** * The maximum number of [tokens](/tokenizer) to generate in the chat completion. diff --git a/src/sdk/models/shared/createcompletionrequest.ts b/src/sdk/models/shared/createcompletionrequest.ts index 7bfa3bd..6cefcc7 100755 --- a/src/sdk/models/shared/createcompletionrequest.ts +++ b/src/sdk/models/shared/createcompletionrequest.ts @@ -3,19 +3,7 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose, Type } from "class-transformer"; - -/** - * Modify the likelihood of specified tokens appearing in the completion. - * - * @remarks - * - * Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - * - * As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. - * - */ -export class CreateCompletionRequestLogitBias extends SpeakeasyBase {} +import { Expose } from "class-transformer"; /** * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. @@ -82,8 +70,7 @@ export class CreateCompletionRequest extends SpeakeasyBase { */ @SpeakeasyMetadata() @Expose({ name: "logit_bias" }) - @Type(() => CreateCompletionRequestLogitBias) - logitBias?: CreateCompletionRequestLogitBias; + logitBias?: Record; /** * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. diff --git a/src/sdk/models/shared/createcompletionresponse.ts b/src/sdk/models/shared/createcompletionresponse.ts index 5e0f34d..0894461 100755 --- a/src/sdk/models/shared/createcompletionresponse.ts +++ b/src/sdk/models/shared/createcompletionresponse.ts @@ -10,8 +10,6 @@ export enum CreateCompletionResponseChoicesFinishReason { Length = "length", } -export class CreateCompletionResponseChoicesLogprobsTopLogprobs extends SpeakeasyBase {} - export class CreateCompletionResponseChoicesLogprobs extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "text_offset" }) @@ -25,10 +23,9 @@ export class CreateCompletionResponseChoicesLogprobs extends SpeakeasyBase { @Expose({ name: "tokens" }) tokens?: string[]; - @SpeakeasyMetadata({ elemType: CreateCompletionResponseChoicesLogprobsTopLogprobs }) + @SpeakeasyMetadata() @Expose({ name: "top_logprobs" }) - @Type(() => CreateCompletionResponseChoicesLogprobsTopLogprobs) - topLogprobs?: CreateCompletionResponseChoicesLogprobsTopLogprobs[]; + topLogprobs?: Record[]; } export class CreateCompletionResponseChoices extends SpeakeasyBase { diff --git a/src/sdk/models/shared/createeditresponse.ts b/src/sdk/models/shared/createeditresponse.ts index 6a52f8e..4028cfc 100755 --- a/src/sdk/models/shared/createeditresponse.ts +++ b/src/sdk/models/shared/createeditresponse.ts @@ -10,8 +10,6 @@ export enum CreateEditResponseChoicesFinishReason { Length = "length", } -export class CreateEditResponseChoicesLogprobsTopLogprobs extends SpeakeasyBase {} - export class CreateEditResponseChoicesLogprobs extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "text_offset" }) @@ -25,10 +23,9 @@ export class CreateEditResponseChoicesLogprobs extends SpeakeasyBase { @Expose({ name: "tokens" }) tokens?: string[]; - @SpeakeasyMetadata({ elemType: CreateEditResponseChoicesLogprobsTopLogprobs }) + @SpeakeasyMetadata() @Expose({ name: "top_logprobs" }) - @Type(() => CreateEditResponseChoicesLogprobsTopLogprobs) - topLogprobs?: CreateEditResponseChoicesLogprobsTopLogprobs[]; + topLogprobs?: Record[]; } export class CreateEditResponseChoices extends SpeakeasyBase { diff --git a/src/sdk/models/shared/finetune.ts b/src/sdk/models/shared/finetune.ts index 6ab53ab..53a3b5c 100755 --- a/src/sdk/models/shared/finetune.ts +++ b/src/sdk/models/shared/finetune.ts @@ -7,7 +7,35 @@ import { FineTuneEvent } from "./finetuneevent"; import { OpenAIFile } from "./openaifile"; import { Expose, Type } from "class-transformer"; -export class FineTuneHyperparams extends SpeakeasyBase {} +export class FineTuneHyperparams extends SpeakeasyBase { + @SpeakeasyMetadata() + @Expose({ name: "batch_size" }) + batchSize: number; + + @SpeakeasyMetadata() + @Expose({ name: "classification_n_classes" }) + classificationNClasses?: number; + + @SpeakeasyMetadata() + @Expose({ name: "classification_positive_class" }) + classificationPositiveClass?: string; + + @SpeakeasyMetadata() + @Expose({ name: "compute_classification_metrics" }) + computeClassificationMetrics?: boolean; + + @SpeakeasyMetadata() + @Expose({ name: "learning_rate_multiplier" }) + learningRateMultiplier: number; + + @SpeakeasyMetadata() + @Expose({ name: "n_epochs" }) + nEpochs: number; + + @SpeakeasyMetadata() + @Expose({ name: "prompt_loss_weight" }) + promptLossWeight: number; +} /** * OK diff --git a/src/sdk/models/shared/openaifile.ts b/src/sdk/models/shared/openaifile.ts index 3c6dea6..8d5d59b 100755 --- a/src/sdk/models/shared/openaifile.ts +++ b/src/sdk/models/shared/openaifile.ts @@ -3,9 +3,7 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose, Type } from "class-transformer"; - -export class OpenAIFileStatusDetails extends SpeakeasyBase {} +import { Expose } from "class-transformer"; /** * OK @@ -41,6 +39,5 @@ export class OpenAIFile extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "status_details" }) - @Type(() => OpenAIFileStatusDetails) - statusDetails?: OpenAIFileStatusDetails; + statusDetails?: string; } diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 6966cf1..b2c8e9b 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,7 +38,7 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.4.0"; + sdkVersion = "2.4.1"; genVersion = "2.55.0"; public constructor(init?: Partial) { From c89f5931da01ee53ef94433588e15b4f859c0790 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Thu, 6 Jul 2023 01:22:37 +0000 Subject: [PATCH 16/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.52.2 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/internal/utils/requestbody.ts | 6 ++++++ src/sdk/sdk.ts | 4 ++-- src/sdk/types/rfcdate.ts | 19 ++++++++++++++++--- 7 files changed, 39 insertions(+), 12 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index d9b5cc2..6c9fbe1 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -268,4 +268,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.52.0 (2.55.0) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.4.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.4.1 - . \ No newline at end of file +- [NPM v2.4.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.4.1 - . + +## 2023-07-06 01:22:17 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.52.2 (2.57.2) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.5.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.5.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index be93b41..c676ae4 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 13e9399c8d0f4df4990f9c1ff3b98ef2 docVersion: 2.0.0 - speakeasyVersion: 1.52.0 - generationVersion: 2.55.0 + speakeasyVersion: 1.52.2 + generationVersion: 2.57.2 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.4.1 + version: 2.5.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 0b40237..1234a69 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.4.1", + "version": "2.5.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.4.1", + "version": "2.5.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index d46caa6..d10f64f 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.4.1", + "version": "2.5.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/internal/utils/requestbody.ts b/src/internal/utils/requestbody.ts index 40f601b..734969f 100755 --- a/src/internal/utils/requestbody.ts +++ b/src/internal/utils/requestbody.ts @@ -6,6 +6,7 @@ import {isBooleanRecord, isNumberRecord, isStringRecord, SerializationMethodToCo import FormData from "form-data"; import {RFCDate} from "../../sdk/types"; +import {classToPlain} from "class-transformer"; export const requestMetadataKey = "request"; const mpFormMetadataKey = "multipart_form"; @@ -66,6 +67,11 @@ const serializeContentType = ( break; case "application/json": + [requestHeaders, requestBody] = [ + {"Content-Type": `${contentType}`}, + classToPlain(reqBody, {exposeUnsetFields: false}), + ]; + break; case "text/json": [requestHeaders, requestBody] = [ {"Content-Type": `${contentType}`}, diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index b2c8e9b..592b369 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.4.1"; - genVersion = "2.55.0"; + sdkVersion = "2.5.0"; + genVersion = "2.57.2"; public constructor(init?: Partial) { Object.assign(this, init); diff --git a/src/sdk/types/rfcdate.ts b/src/sdk/types/rfcdate.ts index 1a3d24b..903ac7d 100755 --- a/src/sdk/types/rfcdate.ts +++ b/src/sdk/types/rfcdate.ts @@ -5,11 +5,24 @@ export class RFCDate { private date: Date; - constructor(date?: Date | string) { + constructor(date: Date | {date:string} | string | undefined) { + if (!date) { + this.date = new Date(); + return; + } + if (typeof date === "string") { this.date = new Date(date); - } else { - this.date = date ?? new Date(); + return; + } + if (date instanceof Date) { + this.date = date as Date + return; + } + + const anyDate = (date as any); + if (date && !!anyDate.date) { + this.date = new Date(anyDate.date); } } From 50211b8c451fee1d8a8ffd7630e416fe2022a507 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 7 Jul 2023 01:22:29 +0000 Subject: [PATCH 17/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.53.0 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 6c9fbe1..ebdeac8 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -276,4 +276,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.52.2 (2.57.2) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.5.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.5.0 - . \ No newline at end of file +- [NPM v2.5.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.5.0 - . + +## 2023-07-07 01:22:06 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.53.0 (2.58.0) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.6.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.6.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index c676ae4..6426162 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 13e9399c8d0f4df4990f9c1ff3b98ef2 docVersion: 2.0.0 - speakeasyVersion: 1.52.2 - generationVersion: 2.57.2 + speakeasyVersion: 1.53.0 + generationVersion: 2.58.0 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.5.0 + version: 2.6.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 1234a69..508e0b4 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.5.0", + "version": "2.6.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.5.0", + "version": "2.6.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index d10f64f..8bb4ec4 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.5.0", + "version": "2.6.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 592b369..aba8d29 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.5.0"; - genVersion = "2.57.2"; + sdkVersion = "2.6.0"; + genVersion = "2.58.0"; public constructor(init?: Partial) { Object.assign(this, init); From 03557004ea2c27f11e1e4de661163d8b978cb960 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sat, 8 Jul 2023 01:21:13 +0000 Subject: [PATCH 18/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.53.1 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index ebdeac8..d8cfdc2 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -284,4 +284,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.53.0 (2.58.0) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.6.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.6.0 - . \ No newline at end of file +- [NPM v2.6.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.6.0 - . + +## 2023-07-08 01:20:49 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.53.1 (2.58.2) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.6.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.6.1 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 6426162..153a5e0 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 13e9399c8d0f4df4990f9c1ff3b98ef2 docVersion: 2.0.0 - speakeasyVersion: 1.53.0 - generationVersion: 2.58.0 + speakeasyVersion: 1.53.1 + generationVersion: 2.58.2 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.6.0 + version: 2.6.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 508e0b4..53a00f0 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.6.0", + "version": "2.6.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.6.0", + "version": "2.6.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 8bb4ec4..b447b83 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.6.0", + "version": "2.6.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index aba8d29..9c1322a 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.6.0"; - genVersion = "2.58.0"; + sdkVersion = "2.6.1"; + genVersion = "2.58.2"; public constructor(init?: Partial) { Object.assign(this, init); From 897f8e32bcd9eb6f6c5770a513f1a3af355c470f Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sun, 9 Jul 2023 01:25:30 +0000 Subject: [PATCH 19/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.53.1 --- README.md | 2 +- RELEASES.md | 10 +++++++++- docs/sdks/openai/README.md | 6 ++++-- gen.yaml | 4 ++-- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/openai.ts | 2 ++ src/sdk/sdk.ts | 2 +- 8 files changed, 22 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 9e9f3be..a26bd98 100755 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ sdk.openAI.cancelFineTune({ * [createChatCompletion](docs/sdks/openai/README.md#createchatcompletion) - Creates a model response for the given chat conversation. * [createCompletion](docs/sdks/openai/README.md#createcompletion) - Creates a completion for the provided prompt and parameters. -* [createEdit](docs/sdks/openai/README.md#createedit) - Creates a new edit for the provided input, instruction, and parameters. +* [~~createEdit~~](docs/sdks/openai/README.md#createedit) - Creates a new edit for the provided input, instruction, and parameters. :warning: **Deprecated** * [createEmbedding](docs/sdks/openai/README.md#createembedding) - Creates an embedding vector representing the input text. * [createFile](docs/sdks/openai/README.md#createfile) - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. diff --git a/RELEASES.md b/RELEASES.md index d8cfdc2..b57e83b 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -292,4 +292,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.53.1 (2.58.2) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.6.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.6.1 - . \ No newline at end of file +- [NPM v2.6.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.6.1 - . + +## 2023-07-09 01:25:10 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.53.1 (2.58.2) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.6.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.6.2 - . \ No newline at end of file diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index 917f922..d02d995 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -10,7 +10,7 @@ The OpenAI REST API * [createChatCompletion](#createchatcompletion) - Creates a model response for the given chat conversation. * [createCompletion](#createcompletion) - Creates a completion for the provided prompt and parameters. -* [createEdit](#createedit) - Creates a new edit for the provided input, instruction, and parameters. +* [~~createEdit~~](#createedit) - Creates a new edit for the provided input, instruction, and parameters. :warning: **Deprecated** * [createEmbedding](#createembedding) - Creates an embedding vector representing the input text. * [createFile](#createfile) - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. @@ -246,10 +246,12 @@ sdk.openAI.createCompletion({ **Promise<[operations.CreateCompletionResponse](../../models/operations/createcompletionresponse.md)>** -## createEdit +## ~~createEdit~~ Creates a new edit for the provided input, instruction, and parameters. +> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. + ### Example Usage ```typescript diff --git a/gen.yaml b/gen.yaml index 153a5e0..0133330 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,6 +1,6 @@ configVersion: 1.0.0 management: - docChecksum: 13e9399c8d0f4df4990f9c1ff3b98ef2 + docChecksum: 0bb43455052751ae97d57d099733aace docVersion: 2.0.0 speakeasyVersion: 1.53.1 generationVersion: 2.58.2 @@ -10,7 +10,7 @@ generation: singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.6.1 + version: 2.6.2 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 53a00f0..6960a41 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.6.1", + "version": "2.6.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.6.1", + "version": "2.6.2", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index b447b83..eb6107d 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.6.1", + "version": "2.6.2", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index 635bb08..adcbfed 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -225,6 +225,8 @@ export class OpenAI { /** * Creates a new edit for the provided input, instruction, and parameters. + * + * @deprecated this method will be removed in a future release, please migrate away from it as soon as possible */ async createEdit( req: shared.CreateEditRequest, diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 9c1322a..28b22b3 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,7 +38,7 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.6.1"; + sdkVersion = "2.6.2"; genVersion = "2.58.2"; public constructor(init?: Partial) { From f5a42c17153e03376a15c24d464d923b9ac94ae1 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Tue, 11 Jul 2023 01:13:20 +0000 Subject: [PATCH 20/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.56.0 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index b57e83b..d74350f 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -300,4 +300,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.53.1 (2.58.2) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.6.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.6.2 - . \ No newline at end of file +- [NPM v2.6.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.6.2 - . + +## 2023-07-11 01:13:01 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.56.0 (2.61.0) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.7.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.7.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 0133330..0af5026 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 0bb43455052751ae97d57d099733aace docVersion: 2.0.0 - speakeasyVersion: 1.53.1 - generationVersion: 2.58.2 + speakeasyVersion: 1.56.0 + generationVersion: 2.61.0 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.6.2 + version: 2.7.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 6960a41..0b1d567 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.6.2", + "version": "2.7.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.6.2", + "version": "2.7.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index eb6107d..e061a46 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.6.2", + "version": "2.7.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 28b22b3..170922a 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.6.2"; - genVersion = "2.58.2"; + sdkVersion = "2.7.0"; + genVersion = "2.61.0"; public constructor(init?: Partial) { Object.assign(this, init); From 6315098cb2c210e4d34f2449e14864d1a981b3b7 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 12 Jul 2023 01:19:07 +0000 Subject: [PATCH 21/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.56.4 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index d74350f..3f56ca9 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -308,4 +308,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.56.0 (2.61.0) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.7.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.7.0 - . \ No newline at end of file +- [NPM v2.7.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.7.0 - . + +## 2023-07-12 01:18:46 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.56.4 (2.61.5) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.7.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.7.1 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 0af5026..d40fa30 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 0bb43455052751ae97d57d099733aace docVersion: 2.0.0 - speakeasyVersion: 1.56.0 - generationVersion: 2.61.0 + speakeasyVersion: 1.56.4 + generationVersion: 2.61.5 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.7.0 + version: 2.7.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 0b1d567..8db60b6 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.7.0", + "version": "2.7.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.7.0", + "version": "2.7.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index e061a46..c1aaf6e 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.7.0", + "version": "2.7.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 170922a..0577be9 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.7.0"; - genVersion = "2.61.0"; + sdkVersion = "2.7.1"; + genVersion = "2.61.5"; public constructor(init?: Partial) { Object.assign(this, init); From 6fe8bcdf1c272dcd1faa214c7c9553950d5d4260 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Thu, 13 Jul 2023 01:22:43 +0000 Subject: [PATCH 22/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.57.0 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 3f56ca9..850307e 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -316,4 +316,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.56.4 (2.61.5) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.7.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.7.1 - . \ No newline at end of file +- [NPM v2.7.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.7.1 - . + +## 2023-07-13 01:22:18 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.57.0 (2.62.1) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.8.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.8.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index d40fa30..b476a6a 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 0bb43455052751ae97d57d099733aace docVersion: 2.0.0 - speakeasyVersion: 1.56.4 - generationVersion: 2.61.5 + speakeasyVersion: 1.57.0 + generationVersion: 2.62.1 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.7.1 + version: 2.8.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 8db60b6..df579f5 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.7.1", + "version": "2.8.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.7.1", + "version": "2.8.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index c1aaf6e..2d29b76 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.7.1", + "version": "2.8.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 0577be9..e6ffb9a 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.7.1"; - genVersion = "2.61.5"; + sdkVersion = "2.8.0"; + genVersion = "2.62.1"; public constructor(init?: Partial) { Object.assign(this, init); From 901c5554c04b2577bb9e7a24e7c4160d6a0db5b3 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 14 Jul 2023 01:21:48 +0000 Subject: [PATCH 23/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.59.0 --- README.md | 2 ++ RELEASES.md | 10 ++++++- USAGE.md | 2 ++ .../createchatcompletionresponsechoices.md | 6 ++-- .../shared/createeditresponsechoices.md | 7 ++--- .../createeditresponsechoiceslogprobs.md | 11 ------- ...uest.md => createtranscriptionrequest1.md} | 4 +-- ...reatetranscriptionrequestresponseformat.md | 15 ++++++++++ docs/sdks/openai/README.md | 18 +++++------- files.gen | 6 ++-- gen.yaml | 8 ++--- package-lock.json | 4 +-- package.json | 2 +- .../shared/createchatcompletionresponse.ts | 6 ++-- src/sdk/models/shared/createeditresponse.ts | 29 ++----------------- ...uest.ts => createtranscriptionrequest1.ts} | 18 ++++++++++-- src/sdk/models/shared/index.ts | 2 +- src/sdk/openai.ts | 4 +-- src/sdk/sdk.ts | 4 +-- 19 files changed, 81 insertions(+), 77 deletions(-) delete mode 100755 docs/models/shared/createeditresponsechoiceslogprobs.md rename docs/models/shared/{createtranscriptionrequest.md => createtranscriptionrequest1.md} (98%) create mode 100755 docs/models/shared/createtranscriptionrequestresponseformat.md rename src/sdk/models/shared/{createtranscriptionrequest.ts => createtranscriptionrequest1.ts} (85%) diff --git a/README.md b/README.md index a26bd98..14eb585 100755 --- a/README.md +++ b/README.md @@ -39,6 +39,8 @@ Authorization: Bearer YOUR_API_KEY ## SDK Example Usage + + ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CancelFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; diff --git a/RELEASES.md b/RELEASES.md index 850307e..e58d0f6 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -324,4 +324,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.57.0 (2.62.1) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.8.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.8.0 - . \ No newline at end of file +- [NPM v2.8.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.8.0 - . + +## 2023-07-14 01:21:29 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.59.0 (2.65.0) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.9.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.9.0 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index 1eedb9f..c3e64a8 100755 --- a/USAGE.md +++ b/USAGE.md @@ -1,4 +1,6 @@ + + ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CancelFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; diff --git a/docs/models/shared/createchatcompletionresponsechoices.md b/docs/models/shared/createchatcompletionresponsechoices.md index 0de5ed7..2f12b62 100755 --- a/docs/models/shared/createchatcompletionresponsechoices.md +++ b/docs/models/shared/createchatcompletionresponsechoices.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | -| `finishReason` | [CreateChatCompletionResponseChoicesFinishReason](../../models/shared/createchatcompletionresponsechoicesfinishreason.md) | :heavy_minus_sign: | N/A | -| `index` | *number* | :heavy_minus_sign: | N/A | -| `message` | [ChatCompletionResponseMessage](../../models/shared/chatcompletionresponsemessage.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `finishReason` | [CreateChatCompletionResponseChoicesFinishReason](../../models/shared/createchatcompletionresponsechoicesfinishreason.md) | :heavy_check_mark: | N/A | +| `index` | *number* | :heavy_check_mark: | N/A | +| `message` | [ChatCompletionResponseMessage](../../models/shared/chatcompletionresponsemessage.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createeditresponsechoices.md b/docs/models/shared/createeditresponsechoices.md index 358667d..7f5e22a 100755 --- a/docs/models/shared/createeditresponsechoices.md +++ b/docs/models/shared/createeditresponsechoices.md @@ -5,7 +5,6 @@ | Field | Type | Required | Description | | ----------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | -| `finishReason` | [CreateEditResponseChoicesFinishReason](../../models/shared/createeditresponsechoicesfinishreason.md) | :heavy_minus_sign: | N/A | -| `index` | *number* | :heavy_minus_sign: | N/A | -| `logprobs` | [CreateEditResponseChoicesLogprobs](../../models/shared/createeditresponsechoiceslogprobs.md) | :heavy_minus_sign: | N/A | -| `text` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `finishReason` | [CreateEditResponseChoicesFinishReason](../../models/shared/createeditresponsechoicesfinishreason.md) | :heavy_check_mark: | N/A | +| `index` | *number* | :heavy_check_mark: | N/A | +| `text` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createeditresponsechoiceslogprobs.md b/docs/models/shared/createeditresponsechoiceslogprobs.md deleted file mode 100755 index e7b154c..0000000 --- a/docs/models/shared/createeditresponsechoiceslogprobs.md +++ /dev/null @@ -1,11 +0,0 @@ -# CreateEditResponseChoicesLogprobs - - -## Fields - -| Field | Type | Required | Description | -| -------------------------- | -------------------------- | -------------------------- | -------------------------- | -| `textOffset` | *number*[] | :heavy_minus_sign: | N/A | -| `tokenLogprobs` | *number*[] | :heavy_minus_sign: | N/A | -| `tokens` | *string*[] | :heavy_minus_sign: | N/A | -| `topLogprobs` | Record[] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequest.md b/docs/models/shared/createtranscriptionrequest1.md similarity index 98% rename from docs/models/shared/createtranscriptionrequest.md rename to docs/models/shared/createtranscriptionrequest1.md index 3cea803..8c46750 100755 --- a/docs/models/shared/createtranscriptionrequest.md +++ b/docs/models/shared/createtranscriptionrequest1.md @@ -1,4 +1,4 @@ -# CreateTranscriptionRequest +# CreateTranscriptionRequest1 ## Fields @@ -9,5 +9,5 @@ | `language` | *string* | :heavy_minus_sign: | The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
| | `model` | *any* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| | `prompt` | *string* | :heavy_minus_sign: | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
| -| `responseFormat` | *string* | :heavy_minus_sign: | The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
| +| `responseFormat` | [CreateTranscriptionRequestResponseFormat](../../models/shared/createtranscriptionrequestresponseformat.md) | :heavy_minus_sign: | The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
| | `temperature` | *number* | :heavy_minus_sign: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
| \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequestresponseformat.md b/docs/models/shared/createtranscriptionrequestresponseformat.md new file mode 100755 index 0000000..13488d6 --- /dev/null +++ b/docs/models/shared/createtranscriptionrequestresponseformat.md @@ -0,0 +1,15 @@ +# CreateTranscriptionRequestResponseFormat + +The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `Json` | json | +| `Text` | text | +| `Srt` | srt | +| `VerboseJson` | verbose_json | +| `Vtt` | vtt | \ No newline at end of file diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index d02d995..42294b5 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -88,10 +88,8 @@ import { Gpt } from "@speakeasy-api/openai"; import { CreateChatCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; import { ChatCompletionRequestMessageRole, - ChatCompletionResponseMessageRole, CreateChatCompletionRequestFunctionCall1, CreateChatCompletionRequestModel2, - CreateChatCompletionResponseChoicesFinishReason, } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); @@ -198,7 +196,7 @@ Creates a completion for the provided prompt and parameters. ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { CreateCompletionRequestModel2, CreateCompletionResponseChoicesFinishReason } from "@speakeasy-api/openai/dist/sdk/models/shared"; +import { CreateCompletionRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); @@ -257,7 +255,7 @@ Creates a new edit for the provided input, instruction, and parameters. ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateEditResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { CreateEditRequestModel2, CreateEditResponseChoicesFinishReason } from "@speakeasy-api/openai/dist/sdk/models/shared"; +import { CreateEditRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); @@ -594,7 +592,7 @@ Transcribes audio into the input language. ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateTranscriptionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { CreateTranscriptionRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; +import { CreateTranscriptionRequestModel2, CreateTranscriptionRequestResponseFormat } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); @@ -606,7 +604,7 @@ sdk.openAI.createTranscription({ language: "maiores", model: "whisper-1", prompt: "corporis", - responseFormat: "dolore", + responseFormat: CreateTranscriptionRequestResponseFormat.Text, temperature: 4808.94, }).then((res: CreateTranscriptionResponse) => { if (res.statusCode == 200) { @@ -617,10 +615,10 @@ sdk.openAI.createTranscription({ ### Parameters -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `request` | [shared.CreateTranscriptionRequest](../../models/shared/createtranscriptionrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `request` | [shared.CreateTranscriptionRequest1](../../models/shared/createtranscriptionrequest1.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | ### Response diff --git a/files.gen b/files.gen index 881278a..68d7ee8 100755 --- a/files.gen +++ b/files.gen @@ -65,7 +65,7 @@ src/sdk/models/shared/createimagevariationrequest.ts src/sdk/models/shared/createmoderationresponse.ts src/sdk/models/shared/createmoderationrequest.ts src/sdk/models/shared/createtranscriptionresponse.ts -src/sdk/models/shared/createtranscriptionrequest.ts +src/sdk/models/shared/createtranscriptionrequest1.ts src/sdk/models/shared/createtranslationresponse.ts src/sdk/models/shared/createtranslationrequest.ts src/sdk/models/shared/deletefileresponse.ts @@ -137,7 +137,6 @@ docs/models/shared/createcompletionresponse.md docs/models/shared/createcompletionrequestmodel2.md docs/models/shared/createcompletionrequest.md docs/models/shared/createeditresponsechoicesfinishreason.md -docs/models/shared/createeditresponsechoiceslogprobs.md docs/models/shared/createeditresponsechoices.md docs/models/shared/createeditresponseusage.md docs/models/shared/createeditresponse.md @@ -171,7 +170,8 @@ docs/models/shared/createmoderationrequest.md docs/models/shared/createtranscriptionresponse.md docs/models/shared/createtranscriptionrequestfile.md docs/models/shared/createtranscriptionrequestmodel2.md -docs/models/shared/createtranscriptionrequest.md +docs/models/shared/createtranscriptionrequestresponseformat.md +docs/models/shared/createtranscriptionrequest1.md docs/models/shared/createtranslationresponse.md docs/models/shared/createtranslationrequestfile.md docs/models/shared/createtranslationrequestmodel2.md diff --git a/gen.yaml b/gen.yaml index b476a6a..fb1f1ae 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,16 +1,16 @@ configVersion: 1.0.0 management: - docChecksum: 0bb43455052751ae97d57d099733aace + docChecksum: 60758465b46e16e9341d0f17cc2819bd docVersion: 2.0.0 - speakeasyVersion: 1.57.0 - generationVersion: 2.62.1 + speakeasyVersion: 1.59.0 + generationVersion: 2.65.0 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.8.0 + version: 2.9.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index df579f5..0f4d33b 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.8.0", + "version": "2.9.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.8.0", + "version": "2.9.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 2d29b76..5a29890 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.8.0", + "version": "2.9.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/createchatcompletionresponse.ts b/src/sdk/models/shared/createchatcompletionresponse.ts index 27dffc7..b57096f 100755 --- a/src/sdk/models/shared/createchatcompletionresponse.ts +++ b/src/sdk/models/shared/createchatcompletionresponse.ts @@ -15,16 +15,16 @@ export enum CreateChatCompletionResponseChoicesFinishReason { export class CreateChatCompletionResponseChoices extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "finish_reason" }) - finishReason?: CreateChatCompletionResponseChoicesFinishReason; + finishReason: CreateChatCompletionResponseChoicesFinishReason; @SpeakeasyMetadata() @Expose({ name: "index" }) - index?: number; + index: number; @SpeakeasyMetadata() @Expose({ name: "message" }) @Type(() => ChatCompletionResponseMessage) - message?: ChatCompletionResponseMessage; + message: ChatCompletionResponseMessage; } export class CreateChatCompletionResponseUsage extends SpeakeasyBase { diff --git a/src/sdk/models/shared/createeditresponse.ts b/src/sdk/models/shared/createeditresponse.ts index 4028cfc..936d3e7 100755 --- a/src/sdk/models/shared/createeditresponse.ts +++ b/src/sdk/models/shared/createeditresponse.ts @@ -10,41 +10,18 @@ export enum CreateEditResponseChoicesFinishReason { Length = "length", } -export class CreateEditResponseChoicesLogprobs extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "text_offset" }) - textOffset?: number[]; - - @SpeakeasyMetadata() - @Expose({ name: "token_logprobs" }) - tokenLogprobs?: number[]; - - @SpeakeasyMetadata() - @Expose({ name: "tokens" }) - tokens?: string[]; - - @SpeakeasyMetadata() - @Expose({ name: "top_logprobs" }) - topLogprobs?: Record[]; -} - export class CreateEditResponseChoices extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "finish_reason" }) - finishReason?: CreateEditResponseChoicesFinishReason; + finishReason: CreateEditResponseChoicesFinishReason; @SpeakeasyMetadata() @Expose({ name: "index" }) - index?: number; - - @SpeakeasyMetadata() - @Expose({ name: "logprobs" }) - @Type(() => CreateEditResponseChoicesLogprobs) - logprobs?: CreateEditResponseChoicesLogprobs; + index: number; @SpeakeasyMetadata() @Expose({ name: "text" }) - text?: string; + text: string; } export class CreateEditResponseUsage extends SpeakeasyBase { diff --git a/src/sdk/models/shared/createtranscriptionrequest.ts b/src/sdk/models/shared/createtranscriptionrequest1.ts similarity index 85% rename from src/sdk/models/shared/createtranscriptionrequest.ts rename to src/sdk/models/shared/createtranscriptionrequest1.ts index a5f7c1f..bc90b8f 100755 --- a/src/sdk/models/shared/createtranscriptionrequest.ts +++ b/src/sdk/models/shared/createtranscriptionrequest1.ts @@ -22,7 +22,21 @@ export enum CreateTranscriptionRequestModel2 { Whisper1 = "whisper-1", } -export class CreateTranscriptionRequest extends SpeakeasyBase { +/** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + * + * @remarks + * + */ +export enum CreateTranscriptionRequestResponseFormat { + Json = "json", + Text = "text", + Srt = "srt", + VerboseJson = "verbose_json", + Vtt = "vtt", +} + +export class CreateTranscriptionRequest1 extends SpeakeasyBase { /** * The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. * @@ -66,7 +80,7 @@ export class CreateTranscriptionRequest extends SpeakeasyBase { * */ @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) - responseFormat?: string; + responseFormat?: CreateTranscriptionRequestResponseFormat; /** * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. diff --git a/src/sdk/models/shared/index.ts b/src/sdk/models/shared/index.ts index 35712b6..5d49308 100755 --- a/src/sdk/models/shared/index.ts +++ b/src/sdk/models/shared/index.ts @@ -20,7 +20,7 @@ export * from "./createimagerequest"; export * from "./createimagevariationrequest"; export * from "./createmoderationrequest"; export * from "./createmoderationresponse"; -export * from "./createtranscriptionrequest"; +export * from "./createtranscriptionrequest1"; export * from "./createtranscriptionresponse"; export * from "./createtranslationrequest"; export * from "./createtranslationresponse"; diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index adcbfed..9061210 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -814,11 +814,11 @@ export class OpenAI { * Transcribes audio into the input language. */ async createTranscription( - req: shared.CreateTranscriptionRequest, + req: shared.CreateTranscriptionRequest1, config?: AxiosRequestConfig ): Promise { if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateTranscriptionRequest(req); + req = new shared.CreateTranscriptionRequest1(req); } const baseURL: string = utils.templateUrl( diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index e6ffb9a..03546ba 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.8.0"; - genVersion = "2.62.1"; + sdkVersion = "2.9.0"; + genVersion = "2.65.0"; public constructor(init?: Partial) { Object.assign(this, init); From 8b9cf6141331cc1cf662e3c60758dc0c973c4d26 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Mon, 17 Jul 2023 01:22:59 +0000 Subject: [PATCH 24/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.60.0 --- RELEASES.md | 10 +- files.gen | 1 + gen.yaml | 6 +- package-lock.json | 4 +- package.json | 2 +- src/sdk/models/errors/sdkerror.ts | 31 ++++++ src/sdk/openai.ts | 161 ++++++++++++++++++++++++++++++ src/sdk/sdk.ts | 4 +- 8 files changed, 210 insertions(+), 9 deletions(-) create mode 100755 src/sdk/models/errors/sdkerror.ts diff --git a/RELEASES.md b/RELEASES.md index e58d0f6..12c1403 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -332,4 +332,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.59.0 (2.65.0) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.9.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.9.0 - . \ No newline at end of file +- [NPM v2.9.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.9.0 - . + +## 2023-07-17 01:22:38 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.60.0 (2.66.0) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.10.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.10.0 - . \ No newline at end of file diff --git a/files.gen b/files.gen index 68d7ee8..55f737e 100755 --- a/files.gen +++ b/files.gen @@ -15,6 +15,7 @@ src/internal/utils/retries.ts src/internal/utils/security.ts src/internal/utils/utils.ts src/sdk/index.ts +src/sdk/models/errors/sdkerror.ts src/sdk/types/index.ts src/sdk/types/rfcdate.ts tsconfig.json diff --git a/gen.yaml b/gen.yaml index fb1f1ae..0cb94ee 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 60758465b46e16e9341d0f17cc2819bd docVersion: 2.0.0 - speakeasyVersion: 1.59.0 - generationVersion: 2.65.0 + speakeasyVersion: 1.60.0 + generationVersion: 2.66.0 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.9.0 + version: 2.10.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 0f4d33b..61e65da 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.9.0", + "version": "2.10.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.9.0", + "version": "2.10.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 5a29890..d0b39c2 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.9.0", + "version": "2.10.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/errors/sdkerror.ts b/src/sdk/models/errors/sdkerror.ts new file mode 100755 index 0000000..0d69872 --- /dev/null +++ b/src/sdk/models/errors/sdkerror.ts @@ -0,0 +1,31 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { AxiosResponse } from "axios"; + +export class SDKError extends Error { + statusCode: number; + body: string; + rawResponse: AxiosResponse; + + constructor( + message: string, + statusCode: number, + body: string, + rawResponse: AxiosResponse + ) { + let bodyString = ""; + if (body?.length > 0) { + bodyString = `\n${body}`; + } + + super(`${message}: Status ${statusCode}${bodyString}`); + this.statusCode = statusCode; + this.body = body; + this.rawResponse = rawResponse; + + this.name = "SDKError"; + Object.setPrototypeOf(this, SDKError.prototype); + } +} diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index 9061210..099bfc1 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -69,6 +69,13 @@ export class OpenAI { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.fineTune = utils.objectToClass(JSON.parse(decodedRes), shared.FineTune); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -143,6 +150,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.CreateChatCompletionResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -216,6 +230,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.CreateCompletionResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -291,6 +312,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.CreateEditResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -364,6 +392,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.CreateEmbeddingResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -435,6 +470,13 @@ export class OpenAI { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.openAIFile = utils.objectToClass(JSON.parse(decodedRes), shared.OpenAIFile); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -510,6 +552,13 @@ export class OpenAI { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.fineTune = utils.objectToClass(JSON.parse(decodedRes), shared.FineTune); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -583,6 +632,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.ImagesResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -656,6 +712,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.ImagesResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -730,6 +793,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.ImagesResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -803,6 +873,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.CreateModerationResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -877,6 +954,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.CreateTranscriptionResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -950,6 +1034,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.CreateTranslationResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -1010,6 +1101,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.DeleteFileResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -1070,6 +1168,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.DeleteModelResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -1127,6 +1232,13 @@ export class OpenAI { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.downloadFile200ApplicationJSONString = decodedRes; + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -1180,6 +1292,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.ListFilesResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -1243,6 +1362,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.ListFineTuneEventsResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -1297,6 +1423,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.ListFineTunesResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -1350,6 +1483,13 @@ export class OpenAI { JSON.parse(decodedRes), shared.ListModelsResponse ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -1407,6 +1547,13 @@ export class OpenAI { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.openAIFile = utils.objectToClass(JSON.parse(decodedRes), shared.OpenAIFile); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -1467,6 +1614,13 @@ export class OpenAI { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.fineTune = utils.objectToClass(JSON.parse(decodedRes), shared.FineTune); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } @@ -1524,6 +1678,13 @@ export class OpenAI { case httpRes?.status == 200: if (utils.matchContentType(contentType, `application/json`)) { res.model = utils.objectToClass(JSON.parse(decodedRes), shared.Model); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); } break; } diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 03546ba..a7845e6 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.9.0"; - genVersion = "2.65.0"; + sdkVersion = "2.10.0"; + genVersion = "2.66.0"; public constructor(init?: Partial) { Object.assign(this, init); From d99a81d35147ee7b84ff505f318dab45a247908b Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Tue, 18 Jul 2023 01:42:19 +0000 Subject: [PATCH 25/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.61.0 --- RELEASES.md | 10 +- .../shared/createchatcompletionrequest.md | 10 +- docs/models/shared/createeditrequest.md | 16 +- docs/models/shared/createembeddingrequest.md | 10 +- .../shared/createembeddingrequestmodel2.md | 3 +- ...trequest.md => createimageeditrequest2.md} | 10 +- .../createimageeditrequestresponseformat.md | 11 ++ .../shared/createimageeditrequestsize.md | 12 ++ docs/models/shared/createimagerequest.md | 14 +- .../shared/createimagevariationrequest.md | 12 -- .../shared/createimagevariationrequest2.md | 12 ++ ...eateimagevariationrequestresponseformat.md | 11 ++ .../shared/createimagevariationrequestsize.md | 12 ++ docs/sdks/openai/README.md | 150 ++++++++++-------- files.gen | 13 +- gen.yaml | 6 +- package-lock.json | 4 +- package.json | 2 +- src/sdk/models/errors/index.ts | 5 + .../shared/createchatcompletionrequest.ts | 36 ++++- src/sdk/models/shared/createeditrequest.ts | 14 +- .../models/shared/createembeddingrequest.ts | 18 ++- ...trequest.ts => createimageeditrequest2.ts} | 42 ++++- src/sdk/models/shared/createimagerequest.ts | 8 +- .../shared/createimagevariationrequest.ts | 33 ---- .../shared/createimagevariationrequest2.ts | 65 ++++++++ src/sdk/models/shared/index.ts | 4 +- src/sdk/openai.ts | 9 +- src/sdk/sdk.ts | 4 +- 29 files changed, 375 insertions(+), 181 deletions(-) rename docs/models/shared/{createimageeditrequest.md => createimageeditrequest2.md} (86%) create mode 100755 docs/models/shared/createimageeditrequestresponseformat.md create mode 100755 docs/models/shared/createimageeditrequestsize.md delete mode 100755 docs/models/shared/createimagevariationrequest.md create mode 100755 docs/models/shared/createimagevariationrequest2.md create mode 100755 docs/models/shared/createimagevariationrequestresponseformat.md create mode 100755 docs/models/shared/createimagevariationrequestsize.md create mode 100755 src/sdk/models/errors/index.ts rename src/sdk/models/shared/{createimageeditrequest.ts => createimageeditrequest2.ts} (58%) delete mode 100755 src/sdk/models/shared/createimagevariationrequest.ts create mode 100755 src/sdk/models/shared/createimagevariationrequest2.ts diff --git a/RELEASES.md b/RELEASES.md index 12c1403..1753416 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -340,4 +340,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.60.0 (2.66.0) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.10.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.10.0 - . \ No newline at end of file +- [NPM v2.10.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.10.0 - . + +## 2023-07-18 01:41:59 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.61.0 (2.70.0) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.11.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.11.0 - . \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionrequest.md b/docs/models/shared/createchatcompletionrequest.md index 4e14305..12e12c1 100755 --- a/docs/models/shared/createchatcompletionrequest.md +++ b/docs/models/shared/createchatcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `frequencyPenalty` | *number* | :heavy_minus_sign: | completions_frequency_penalty_description | | +| `frequencyPenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.

[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
| | | `functionCall` | *any* | :heavy_minus_sign: | Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. | | | `functions` | [ChatCompletionFunctions](../../models/shared/chatcompletionfunctions.md)[] | :heavy_minus_sign: | A list of functions the model may generate JSON inputs for. | | | `logitBias` | Record | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
| | @@ -13,9 +13,9 @@ | `messages` | [ChatCompletionRequestMessage](../../models/shared/chatcompletionrequestmessage.md)[] | :heavy_check_mark: | A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). | | | `model` | *any* | :heavy_check_mark: | ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. | | | `n` | *number* | :heavy_minus_sign: | How many chat completion choices to generate for each input message. | 1 | -| `presencePenalty` | *number* | :heavy_minus_sign: | completions_presence_penalty_description | | +| `presencePenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.

[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
| | | `stop` | *any* | :heavy_minus_sign: | Up to 4 sequences where the API will stop generating further tokens.
| | | `stream` | *boolean* | :heavy_minus_sign: | If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
| | -| `temperature` | *number* | :heavy_minus_sign: | completions_temperature_description | 1 | -| `topP` | *number* | :heavy_minus_sign: | completions_top_p_description | 1 | -| `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

We generally recommend altering this or `top_p` but not both.
| 1 | +| `topP` | *number* | :heavy_minus_sign: | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.

We generally recommend altering this or `temperature` but not both.
| 1 | +| `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | \ No newline at end of file diff --git a/docs/models/shared/createeditrequest.md b/docs/models/shared/createeditrequest.md index a6a736d..21bc370 100755 --- a/docs/models/shared/createeditrequest.md +++ b/docs/models/shared/createeditrequest.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | Example | -| -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -| `input` | *string* | :heavy_minus_sign: | The input text to use as a starting point for the edit. | What day of the wek is it? | -| `instruction` | *string* | :heavy_check_mark: | The instruction that tells the model how to edit the prompt. | Fix the spelling mistakes. | -| `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. | | -| `n` | *number* | :heavy_minus_sign: | How many edits to generate for the input and instruction. | 1 | -| `temperature` | *number* | :heavy_minus_sign: | completions_temperature_description | 1 | -| `topP` | *number* | :heavy_minus_sign: | completions_top_p_description | 1 | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input` | *string* | :heavy_minus_sign: | The input text to use as a starting point for the edit. | What day of the wek is it? | +| `instruction` | *string* | :heavy_check_mark: | The instruction that tells the model how to edit the prompt. | Fix the spelling mistakes. | +| `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. | | +| `n` | *number* | :heavy_minus_sign: | How many edits to generate for the input and instruction. | 1 | +| `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

We generally recommend altering this or `top_p` but not both.
| 1 | +| `topP` | *number* | :heavy_minus_sign: | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.

We generally recommend altering this or `temperature` but not both.
| 1 | \ No newline at end of file diff --git a/docs/models/shared/createembeddingrequest.md b/docs/models/shared/createembeddingrequest.md index aacad57..9de9fd3 100755 --- a/docs/models/shared/createembeddingrequest.md +++ b/docs/models/shared/createembeddingrequest.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `input` | *any* | :heavy_check_mark: | Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`). [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| -| `model` | *any* | :heavy_check_mark: | model_description | -| `user` | *any* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input` | *any* | :heavy_check_mark: | Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`). [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| | +| `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
| | +| `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | \ No newline at end of file diff --git a/docs/models/shared/createembeddingrequestmodel2.md b/docs/models/shared/createembeddingrequestmodel2.md index 22ba5fa..2fa004a 100755 --- a/docs/models/shared/createembeddingrequestmodel2.md +++ b/docs/models/shared/createembeddingrequestmodel2.md @@ -1,6 +1,7 @@ # CreateEmbeddingRequestModel2 -model_description +ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + ## Values diff --git a/docs/models/shared/createimageeditrequest.md b/docs/models/shared/createimageeditrequest2.md similarity index 86% rename from docs/models/shared/createimageeditrequest.md rename to docs/models/shared/createimageeditrequest2.md index e280422..6c22a54 100755 --- a/docs/models/shared/createimageeditrequest.md +++ b/docs/models/shared/createimageeditrequest2.md @@ -1,4 +1,4 @@ -# CreateImageEditRequest +# CreateImageEditRequest2 ## Fields @@ -7,8 +7,8 @@ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `image` | [CreateImageEditRequestImage](../../models/shared/createimageeditrequestimage.md) | :heavy_check_mark: | The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. | | | `mask` | [CreateImageEditRequestMask](../../models/shared/createimageeditrequestmask.md) | :heavy_minus_sign: | An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. | | -| `n` | *any* | :heavy_minus_sign: | N/A | | +| `n` | *number* | :heavy_minus_sign: | The number of images to generate. Must be between 1 and 10. | 1 | | `prompt` | *string* | :heavy_check_mark: | A text description of the desired image(s). The maximum length is 1000 characters. | A cute baby sea otter wearing a beret | -| `responseFormat` | *any* | :heavy_minus_sign: | N/A | | -| `size` | *any* | :heavy_minus_sign: | N/A | | -| `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `responseFormat` | [CreateImageEditRequestResponseFormat](../../models/shared/createimageeditrequestresponseformat.md) | :heavy_minus_sign: | The format in which the generated images are returned. Must be one of `url` or `b64_json`. | url | +| `size` | [CreateImageEditRequestSize](../../models/shared/createimageeditrequestsize.md) | :heavy_minus_sign: | The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. | 1024x1024 | +| `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | \ No newline at end of file diff --git a/docs/models/shared/createimageeditrequestresponseformat.md b/docs/models/shared/createimageeditrequestresponseformat.md new file mode 100755 index 0000000..65ebdad --- /dev/null +++ b/docs/models/shared/createimageeditrequestresponseformat.md @@ -0,0 +1,11 @@ +# CreateImageEditRequestResponseFormat + +The format in which the generated images are returned. Must be one of `url` or `b64_json`. + + +## Values + +| Name | Value | +| --------- | --------- | +| `Url` | url | +| `B64Json` | b64_json | \ No newline at end of file diff --git a/docs/models/shared/createimageeditrequestsize.md b/docs/models/shared/createimageeditrequestsize.md new file mode 100755 index 0000000..3f27723 --- /dev/null +++ b/docs/models/shared/createimageeditrequestsize.md @@ -0,0 +1,12 @@ +# CreateImageEditRequestSize + +The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + + +## Values + +| Name | Value | +| ------------------------------- | ------------------------------- | +| `TwoHundredAndFiftySixx256` | 256x256 | +| `FiveHundredAndTwelvex512` | 512x512 | +| `OneThousandAndTwentyFourx1024` | 1024x1024 | \ No newline at end of file diff --git a/docs/models/shared/createimagerequest.md b/docs/models/shared/createimagerequest.md index 8885741..401fa7b 100755 --- a/docs/models/shared/createimagerequest.md +++ b/docs/models/shared/createimagerequest.md @@ -3,10 +3,10 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | -| `n` | *number* | :heavy_minus_sign: | The number of images to generate. Must be between 1 and 10. | 1 | -| `prompt` | *string* | :heavy_check_mark: | A text description of the desired image(s). The maximum length is 1000 characters. | A cute baby sea otter | -| `responseFormat` | [CreateImageRequestResponseFormat](../../models/shared/createimagerequestresponseformat.md) | :heavy_minus_sign: | The format in which the generated images are returned. Must be one of `url` or `b64_json`. | url | -| `size` | [CreateImageRequestSize](../../models/shared/createimagerequestsize.md) | :heavy_minus_sign: | The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. | 1024x1024 | -| `user` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `n` | *number* | :heavy_minus_sign: | The number of images to generate. Must be between 1 and 10. | 1 | +| `prompt` | *string* | :heavy_check_mark: | A text description of the desired image(s). The maximum length is 1000 characters. | A cute baby sea otter | +| `responseFormat` | [CreateImageRequestResponseFormat](../../models/shared/createimagerequestresponseformat.md) | :heavy_minus_sign: | The format in which the generated images are returned. Must be one of `url` or `b64_json`. | url | +| `size` | [CreateImageRequestSize](../../models/shared/createimagerequestsize.md) | :heavy_minus_sign: | The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. | 1024x1024 | +| `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | \ No newline at end of file diff --git a/docs/models/shared/createimagevariationrequest.md b/docs/models/shared/createimagevariationrequest.md deleted file mode 100755 index 582a7ad..0000000 --- a/docs/models/shared/createimagevariationrequest.md +++ /dev/null @@ -1,12 +0,0 @@ -# CreateImageVariationRequest - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -| `image` | [CreateImageVariationRequestImage](../../models/shared/createimagevariationrequestimage.md) | :heavy_check_mark: | The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. | -| `n` | *any* | :heavy_minus_sign: | N/A | -| `responseFormat` | *any* | :heavy_minus_sign: | N/A | -| `size` | *any* | :heavy_minus_sign: | N/A | -| `user` | *any* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createimagevariationrequest2.md b/docs/models/shared/createimagevariationrequest2.md new file mode 100755 index 0000000..49c38ac --- /dev/null +++ b/docs/models/shared/createimagevariationrequest2.md @@ -0,0 +1,12 @@ +# CreateImageVariationRequest2 + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `image` | [CreateImageVariationRequestImage](../../models/shared/createimagevariationrequestimage.md) | :heavy_check_mark: | The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. | | +| `n` | *number* | :heavy_minus_sign: | The number of images to generate. Must be between 1 and 10. | 1 | +| `responseFormat` | [CreateImageVariationRequestResponseFormat](../../models/shared/createimagevariationrequestresponseformat.md) | :heavy_minus_sign: | The format in which the generated images are returned. Must be one of `url` or `b64_json`. | url | +| `size` | [CreateImageVariationRequestSize](../../models/shared/createimagevariationrequestsize.md) | :heavy_minus_sign: | The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. | 1024x1024 | +| `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | \ No newline at end of file diff --git a/docs/models/shared/createimagevariationrequestresponseformat.md b/docs/models/shared/createimagevariationrequestresponseformat.md new file mode 100755 index 0000000..650241b --- /dev/null +++ b/docs/models/shared/createimagevariationrequestresponseformat.md @@ -0,0 +1,11 @@ +# CreateImageVariationRequestResponseFormat + +The format in which the generated images are returned. Must be one of `url` or `b64_json`. + + +## Values + +| Name | Value | +| --------- | --------- | +| `Url` | url | +| `B64Json` | b64_json | \ No newline at end of file diff --git a/docs/models/shared/createimagevariationrequestsize.md b/docs/models/shared/createimagevariationrequestsize.md new file mode 100755 index 0000000..5dcf6d9 --- /dev/null +++ b/docs/models/shared/createimagevariationrequestsize.md @@ -0,0 +1,12 @@ +# CreateImageVariationRequestSize + +The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + + +## Values + +| Name | Value | +| ------------------------------- | ------------------------------- | +| `TwoHundredAndFiftySixx256` | 256x256 | +| `FiveHundredAndTwelvex512` | 512x512 | +| `OneThousandAndTwentyFourx1024` | 1024x1024 | \ No newline at end of file diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index 42294b5..fca8bcb 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -166,7 +166,7 @@ sdk.openAI.createChatCompletion({ stream: false, temperature: 1, topP: 1, - user: "dolorem", + user: "user-1234", }).then((res: CreateChatCompletionResponse) => { if (res.statusCode == 200) { // handle response @@ -201,24 +201,20 @@ import { CreateCompletionRequestModel2 } from "@speakeasy-api/openai/dist/sdk/mo const sdk = new Gpt(); sdk.openAI.createCompletion({ - bestOf: 358152, + bestOf: 210382, echo: false, - frequencyPenalty: 1289.26, + frequencyPenalty: 3581.52, logitBias: { - "enim": 607831, - "nemo": 325047, - "excepturi": 38425, - "iure": 634274, + "nobis": 315428, }, - logprobs: 988374, + logprobs: 607831, maxTokens: 16, - model: CreateCompletionRequestModel2.TextDavinci003, + model: "minima", n: 1, - presencePenalty: 6527.9, + presencePenalty: 5701.97, prompt: "This is a test.", - stop: [ - "["\n"]", - ], + stop: " +", stream: false, suffix: "test.", temperature: 1, @@ -301,12 +297,23 @@ const sdk = new Gpt(); sdk.openAI.createEmbedding({ input: [ - 253291, - 414369, - 466311, + [ + 652790, + ], + [ + 635059, + ], + [ + 995300, + ], + [ + 581850, + 253291, + 414369, + ], ], model: "text-embedding-ada-002", - user: "velit", + user: "user-1234", }).then((res: CreateEmbeddingResponse) => { if (res.statusCode == 200) { // handle response @@ -342,10 +349,10 @@ const sdk = new Gpt(); sdk.openAI.createFile({ file: { - content: "error".encode(), - file: "quia", + content: "molestiae".encode(), + file: "velit", }, - purpose: "quis", + purpose: "error", }).then((res: CreateFileResponse) => { if (res.statusCode == 200) { // handle response @@ -385,20 +392,19 @@ import { CreateFineTuneRequestModel2 } from "@speakeasy-api/openai/dist/sdk/mode const sdk = new Gpt(); sdk.openAI.createFineTune({ - batchSize: 110375, + batchSize: 158969, classificationBetas: [ - 6563.3, - 3172.02, - 1381.83, + 1103.75, + 6747.52, ], - classificationNClasses: 778346, - classificationPositiveClass: "sequi", + classificationNClasses: 656330, + classificationPositiveClass: "enim", computeClassificationMetrics: false, - learningRateMultiplier: 9495.72, - model: "curie", - nEpochs: 662527, - promptLossWeight: 8209.94, - suffix: "aut", + learningRateMultiplier: 1381.83, + model: CreateFineTuneRequestModel2.Curie, + nEpochs: 196582, + promptLossWeight: 9495.72, + suffix: "ipsam", trainingFile: "file-ajSREls59WBbvgSzJSVWxMCB", validationFile: "file-XjSREls59WBbvgSzJSVWxMCa", }).then((res: CreateFineTuneResponse) => { @@ -439,7 +445,7 @@ sdk.openAI.createImage({ prompt: "A cute baby sea otter", responseFormat: CreateImageRequestResponseFormat.Url, size: CreateImageRequestSize.OneThousandAndTwentyFourx1024, - user: "quasi", + user: "user-1234", }).then((res: CreateImageResponse) => { if (res.statusCode == 200) { // handle response @@ -469,23 +475,24 @@ Creates an edited or extended image given an original image and a prompt. ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateImageEditResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; +import { CreateImageEditRequestResponseFormat, CreateImageEditRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); sdk.openAI.createImageEdit({ image: { - content: "error".encode(), - image: "temporibus", + content: "id".encode(), + image: "possimus", }, mask: { - content: "laborum".encode(), + content: "aut".encode(), mask: "quasi", }, - n: "reiciendis", + n: 1, prompt: "A cute baby sea otter wearing a beret", - responseFormat: "voluptatibus", - size: "vero", - user: "nihil", + responseFormat: CreateImageEditRequestResponseFormat.Url, + size: CreateImageEditRequestSize.OneThousandAndTwentyFourx1024, + user: "user-1234", }).then((res: CreateImageEditResponse) => { if (res.statusCode == 200) { // handle response @@ -495,10 +502,10 @@ sdk.openAI.createImageEdit({ ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `request` | [shared.CreateImageEditRequest](../../models/shared/createimageeditrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `request` | [shared.CreateImageEditRequest2](../../models/shared/createimageeditrequest2.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | ### Response @@ -515,18 +522,19 @@ Creates a variation of a given image. ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CreateImageVariationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; +import { CreateImageVariationRequestResponseFormat, CreateImageVariationRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; const sdk = new Gpt(); sdk.openAI.createImageVariation({ image: { - content: "praesentium".encode(), - image: "voluptatibus", + content: "error".encode(), + image: "temporibus", }, - n: "ipsa", - responseFormat: "omnis", - size: "voluptate", - user: "cum", + n: 1, + responseFormat: CreateImageVariationRequestResponseFormat.Url, + size: CreateImageVariationRequestSize.OneThousandAndTwentyFourx1024, + user: "user-1234", }).then((res: CreateImageVariationResponse) => { if (res.statusCode == 200) { // handle response @@ -536,10 +544,10 @@ sdk.openAI.createImageVariation({ ### Parameters -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `request` | [shared.CreateImageVariationRequest](../../models/shared/createimagevariationrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `request` | [shared.CreateImageVariationRequest2](../../models/shared/createimagevariationrequest2.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | ### Response @@ -561,8 +569,10 @@ import { CreateModerationRequestModel2 } from "@speakeasy-api/openai/dist/sdk/mo const sdk = new Gpt(); sdk.openAI.createModeration({ - input: "I want to kill them.", - model: "text-moderation-stable", + input: [ + "I want to kill them.", + ], + model: CreateModerationRequestModel2.TextModerationStable, }).then((res: CreateModerationResponse) => { if (res.statusCode == 200) { // handle response @@ -598,14 +608,14 @@ const sdk = new Gpt(); sdk.openAI.createTranscription({ file: { - content: "reprehenderit".encode(), - file: "ut", + content: "voluptatibus".encode(), + file: "vero", }, - language: "maiores", - model: "whisper-1", - prompt: "corporis", - responseFormat: CreateTranscriptionRequestResponseFormat.Text, - temperature: 4808.94, + language: "nihil", + model: CreateTranscriptionRequestModel2.Whisper1, + prompt: "voluptatibus", + responseFormat: CreateTranscriptionRequestResponseFormat.Json, + temperature: 6048.46, }).then((res: CreateTranscriptionResponse) => { if (res.statusCode == 200) { // handle response @@ -641,13 +651,13 @@ const sdk = new Gpt(); sdk.openAI.createTranslation({ file: { - content: "dicta".encode(), - file: "harum", + content: "voluptate".encode(), + file: "cum", }, model: "whisper-1", - prompt: "accusamus", - responseFormat: "commodi", - temperature: 9182.36, + prompt: "doloremque", + responseFormat: "reprehenderit", + temperature: 2828.07, }).then((res: CreateTranslationResponse) => { if (res.statusCode == 200) { // handle response @@ -681,7 +691,7 @@ import { DeleteFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operat const sdk = new Gpt(); sdk.openAI.deleteFile({ - fileId: "quae", + fileId: "maiores", }).then((res: DeleteFileResponse) => { if (res.statusCode == 200) { // handle response @@ -749,7 +759,7 @@ import { DownloadFileResponse } from "@speakeasy-api/openai/dist/sdk/models/oper const sdk = new Gpt(); sdk.openAI.downloadFile({ - fileId: "ipsum", + fileId: "dicta", }).then((res: DownloadFileResponse) => { if (res.statusCode == 200) { // handle response @@ -913,7 +923,7 @@ import { RetrieveFileResponse } from "@speakeasy-api/openai/dist/sdk/models/oper const sdk = new Gpt(); sdk.openAI.retrieveFile({ - fileId: "quidem", + fileId: "corporis", }).then((res: RetrieveFileResponse) => { if (res.statusCode == 200) { // handle response diff --git a/files.gen b/files.gen index 55f737e..722e2bc 100755 --- a/files.gen +++ b/files.gen @@ -61,8 +61,8 @@ src/sdk/models/shared/createfilerequest.ts src/sdk/models/shared/createfinetunerequest.ts src/sdk/models/shared/imagesresponse.ts src/sdk/models/shared/createimagerequest.ts -src/sdk/models/shared/createimageeditrequest.ts -src/sdk/models/shared/createimagevariationrequest.ts +src/sdk/models/shared/createimageeditrequest2.ts +src/sdk/models/shared/createimagevariationrequest2.ts src/sdk/models/shared/createmoderationresponse.ts src/sdk/models/shared/createmoderationrequest.ts src/sdk/models/shared/createtranscriptionresponse.ts @@ -77,6 +77,7 @@ src/sdk/models/shared/listfinetunesresponse.ts src/sdk/models/shared/listmodelsresponse.ts src/sdk/models/shared/model.ts src/sdk/models/shared/index.ts +src/sdk/models/errors/index.ts docs/sdks/gpt/README.md docs/sdks/openai/README.md USAGE.md @@ -159,9 +160,13 @@ docs/models/shared/createimagerequestsize.md docs/models/shared/createimagerequest.md docs/models/shared/createimageeditrequestimage.md docs/models/shared/createimageeditrequestmask.md -docs/models/shared/createimageeditrequest.md +docs/models/shared/createimageeditrequestresponseformat.md +docs/models/shared/createimageeditrequestsize.md +docs/models/shared/createimageeditrequest2.md docs/models/shared/createimagevariationrequestimage.md -docs/models/shared/createimagevariationrequest.md +docs/models/shared/createimagevariationrequestresponseformat.md +docs/models/shared/createimagevariationrequestsize.md +docs/models/shared/createimagevariationrequest2.md docs/models/shared/createmoderationresponseresultscategories.md docs/models/shared/createmoderationresponseresultscategoryscores.md docs/models/shared/createmoderationresponseresults.md diff --git a/gen.yaml b/gen.yaml index 0cb94ee..6fb3251 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 60758465b46e16e9341d0f17cc2819bd docVersion: 2.0.0 - speakeasyVersion: 1.60.0 - generationVersion: 2.66.0 + speakeasyVersion: 1.61.0 + generationVersion: 2.70.0 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.10.0 + version: 2.11.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 61e65da..76736ea 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.10.0", + "version": "2.11.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.10.0", + "version": "2.11.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index d0b39c2..a9ac932 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.10.0", + "version": "2.11.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/errors/index.ts b/src/sdk/models/errors/index.ts new file mode 100755 index 0000000..4779e6a --- /dev/null +++ b/src/sdk/models/errors/index.ts @@ -0,0 +1,5 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +export * from "./sdkerror"; diff --git a/src/sdk/models/shared/createchatcompletionrequest.ts b/src/sdk/models/shared/createchatcompletionrequest.ts index 6ae2b5b..6d29ace 100755 --- a/src/sdk/models/shared/createchatcompletionrequest.ts +++ b/src/sdk/models/shared/createchatcompletionrequest.ts @@ -46,7 +46,12 @@ export enum CreateChatCompletionRequestModel2 { export class CreateChatCompletionRequest extends SpeakeasyBase { /** - * completions_frequency_penalty_description + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + * + * @remarks + * + * [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) + * */ @SpeakeasyMetadata() @Expose({ name: "frequency_penalty" }) @@ -114,7 +119,12 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { n?: number; /** - * completions_presence_penalty_description + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + * + * @remarks + * + * [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) + * */ @SpeakeasyMetadata() @Expose({ name: "presence_penalty" }) @@ -141,20 +151,36 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { stream?: boolean; /** - * completions_temperature_description + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + * + * @remarks + * + * We generally recommend altering this or `top_p` but not both. + * */ @SpeakeasyMetadata() @Expose({ name: "temperature" }) temperature?: number; /** - * completions_top_p_description + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * + * @remarks + * + * We generally recommend altering this or `temperature` but not both. + * */ @SpeakeasyMetadata() @Expose({ name: "top_p" }) topP?: number; + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + * + * @remarks + * + */ @SpeakeasyMetadata() @Expose({ name: "user" }) - user?: any; + user?: string; } diff --git a/src/sdk/models/shared/createeditrequest.ts b/src/sdk/models/shared/createeditrequest.ts index 2a99b1a..a5933fb 100755 --- a/src/sdk/models/shared/createeditrequest.ts +++ b/src/sdk/models/shared/createeditrequest.ts @@ -43,14 +43,24 @@ export class CreateEditRequest extends SpeakeasyBase { n?: number; /** - * completions_temperature_description + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + * + * @remarks + * + * We generally recommend altering this or `top_p` but not both. + * */ @SpeakeasyMetadata() @Expose({ name: "temperature" }) temperature?: number; /** - * completions_top_p_description + * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + * + * @remarks + * + * We generally recommend altering this or `temperature` but not both. + * */ @SpeakeasyMetadata() @Expose({ name: "top_p" }) diff --git a/src/sdk/models/shared/createembeddingrequest.ts b/src/sdk/models/shared/createembeddingrequest.ts index d3beb82..54c0069 100755 --- a/src/sdk/models/shared/createembeddingrequest.ts +++ b/src/sdk/models/shared/createembeddingrequest.ts @@ -6,7 +6,10 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; /** - * model_description + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + * + * @remarks + * */ export enum CreateEmbeddingRequestModel2 { TextEmbeddingAda002 = "text-embedding-ada-002", @@ -24,13 +27,22 @@ export class CreateEmbeddingRequest extends SpeakeasyBase { input: any; /** - * model_description + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + * + * @remarks + * */ @SpeakeasyMetadata() @Expose({ name: "model" }) model: any; + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + * + * @remarks + * + */ @SpeakeasyMetadata() @Expose({ name: "user" }) - user?: any; + user?: string; } diff --git a/src/sdk/models/shared/createimageeditrequest.ts b/src/sdk/models/shared/createimageeditrequest2.ts similarity index 58% rename from src/sdk/models/shared/createimageeditrequest.ts rename to src/sdk/models/shared/createimageeditrequest2.ts index d193c90..ce77bbf 100755 --- a/src/sdk/models/shared/createimageeditrequest.ts +++ b/src/sdk/models/shared/createimageeditrequest2.ts @@ -20,7 +20,24 @@ export class CreateImageEditRequestMask extends SpeakeasyBase { mask: string; } -export class CreateImageEditRequest extends SpeakeasyBase { +/** + * The format in which the generated images are returned. Must be one of `url` or `b64_json`. + */ +export enum CreateImageEditRequestResponseFormat { + Url = "url", + B64Json = "b64_json", +} + +/** + * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + */ +export enum CreateImageEditRequestSize { + TwoHundredAndFiftySixx256 = "256x256", + FiveHundredAndTwelvex512 = "512x512", + OneThousandAndTwentyFourx1024 = "1024x1024", +} + +export class CreateImageEditRequest2 extends SpeakeasyBase { /** * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. */ @@ -33,8 +50,11 @@ export class CreateImageEditRequest extends SpeakeasyBase { @SpeakeasyMetadata({ data: "multipart_form, file=true" }) mask?: CreateImageEditRequestMask; + /** + * The number of images to generate. Must be between 1 and 10. + */ @SpeakeasyMetadata({ data: "multipart_form, name=n" }) - n?: any; + n?: number; /** * A text description of the desired image(s). The maximum length is 1000 characters. @@ -42,12 +62,24 @@ export class CreateImageEditRequest extends SpeakeasyBase { @SpeakeasyMetadata({ data: "multipart_form, name=prompt" }) prompt: string; + /** + * The format in which the generated images are returned. Must be one of `url` or `b64_json`. + */ @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) - responseFormat?: any; + responseFormat?: CreateImageEditRequestResponseFormat; + /** + * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + */ @SpeakeasyMetadata({ data: "multipart_form, name=size" }) - size?: any; + size?: CreateImageEditRequestSize; + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + * + * @remarks + * + */ @SpeakeasyMetadata({ data: "multipart_form, name=user" }) - user?: any; + user?: string; } diff --git a/src/sdk/models/shared/createimagerequest.ts b/src/sdk/models/shared/createimagerequest.ts index e494a1a..6093e10 100755 --- a/src/sdk/models/shared/createimagerequest.ts +++ b/src/sdk/models/shared/createimagerequest.ts @@ -51,7 +51,13 @@ export class CreateImageRequest extends SpeakeasyBase { @Expose({ name: "size" }) size?: CreateImageRequestSize; + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + * + * @remarks + * + */ @SpeakeasyMetadata() @Expose({ name: "user" }) - user?: any; + user?: string; } diff --git a/src/sdk/models/shared/createimagevariationrequest.ts b/src/sdk/models/shared/createimagevariationrequest.ts deleted file mode 100755 index f27c789..0000000 --- a/src/sdk/models/shared/createimagevariationrequest.ts +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; - -export class CreateImageVariationRequestImage extends SpeakeasyBase { - @SpeakeasyMetadata({ data: "multipart_form, content=true" }) - content: Uint8Array; - - @SpeakeasyMetadata({ data: "multipart_form, name=image" }) - image: string; -} - -export class CreateImageVariationRequest extends SpeakeasyBase { - /** - * The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. - */ - @SpeakeasyMetadata({ data: "multipart_form, file=true" }) - image: CreateImageVariationRequestImage; - - @SpeakeasyMetadata({ data: "multipart_form, name=n" }) - n?: any; - - @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) - responseFormat?: any; - - @SpeakeasyMetadata({ data: "multipart_form, name=size" }) - size?: any; - - @SpeakeasyMetadata({ data: "multipart_form, name=user" }) - user?: any; -} diff --git a/src/sdk/models/shared/createimagevariationrequest2.ts b/src/sdk/models/shared/createimagevariationrequest2.ts new file mode 100755 index 0000000..a9267a3 --- /dev/null +++ b/src/sdk/models/shared/createimagevariationrequest2.ts @@ -0,0 +1,65 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; + +export class CreateImageVariationRequestImage extends SpeakeasyBase { + @SpeakeasyMetadata({ data: "multipart_form, content=true" }) + content: Uint8Array; + + @SpeakeasyMetadata({ data: "multipart_form, name=image" }) + image: string; +} + +/** + * The format in which the generated images are returned. Must be one of `url` or `b64_json`. + */ +export enum CreateImageVariationRequestResponseFormat { + Url = "url", + B64Json = "b64_json", +} + +/** + * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + */ +export enum CreateImageVariationRequestSize { + TwoHundredAndFiftySixx256 = "256x256", + FiveHundredAndTwelvex512 = "512x512", + OneThousandAndTwentyFourx1024 = "1024x1024", +} + +export class CreateImageVariationRequest2 extends SpeakeasyBase { + /** + * The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + */ + @SpeakeasyMetadata({ data: "multipart_form, file=true" }) + image: CreateImageVariationRequestImage; + + /** + * The number of images to generate. Must be between 1 and 10. + */ + @SpeakeasyMetadata({ data: "multipart_form, name=n" }) + n?: number; + + /** + * The format in which the generated images are returned. Must be one of `url` or `b64_json`. + */ + @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) + responseFormat?: CreateImageVariationRequestResponseFormat; + + /** + * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + */ + @SpeakeasyMetadata({ data: "multipart_form, name=size" }) + size?: CreateImageVariationRequestSize; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=user" }) + user?: string; +} diff --git a/src/sdk/models/shared/index.ts b/src/sdk/models/shared/index.ts index 5d49308..b6be31b 100755 --- a/src/sdk/models/shared/index.ts +++ b/src/sdk/models/shared/index.ts @@ -15,9 +15,9 @@ export * from "./createembeddingrequest"; export * from "./createembeddingresponse"; export * from "./createfilerequest"; export * from "./createfinetunerequest"; -export * from "./createimageeditrequest"; +export * from "./createimageeditrequest2"; export * from "./createimagerequest"; -export * from "./createimagevariationrequest"; +export * from "./createimagevariationrequest2"; export * from "./createmoderationrequest"; export * from "./createmoderationresponse"; export * from "./createtranscriptionrequest1"; diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index 099bfc1..cd8d3f3 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -3,6 +3,7 @@ */ import * as utils from "../internal/utils"; +import * as errors from "./models/errors"; import * as operations from "./models/operations"; import * as shared from "./models/shared"; import { SDKConfiguration } from "./sdk"; @@ -650,11 +651,11 @@ export class OpenAI { * Creates an edited or extended image given an original image and a prompt. */ async createImageEdit( - req: shared.CreateImageEditRequest, + req: shared.CreateImageEditRequest2, config?: AxiosRequestConfig ): Promise { if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateImageEditRequest(req); + req = new shared.CreateImageEditRequest2(req); } const baseURL: string = utils.templateUrl( @@ -730,11 +731,11 @@ export class OpenAI { * Creates a variation of a given image. */ async createImageVariation( - req: shared.CreateImageVariationRequest, + req: shared.CreateImageVariationRequest2, config?: AxiosRequestConfig ): Promise { if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateImageVariationRequest(req); + req = new shared.CreateImageVariationRequest2(req); } const baseURL: string = utils.templateUrl( diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index a7845e6..89f6d7e 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.10.0"; - genVersion = "2.66.0"; + sdkVersion = "2.11.0"; + genVersion = "2.70.0"; public constructor(init?: Partial) { Object.assign(this, init); From c90fd072f6bf4fd7f27375672a71c7384a26130b Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 19 Jul 2023 02:30:38 +0000 Subject: [PATCH 26/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.62.1 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 1753416..3c5dfb1 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -348,4 +348,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.61.0 (2.70.0) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.11.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.11.0 - . \ No newline at end of file +- [NPM v2.11.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.11.0 - . + +## 2023-07-19 02:30:18 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.62.1 (2.70.2) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.11.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.11.1 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 6fb3251..4792623 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 60758465b46e16e9341d0f17cc2819bd docVersion: 2.0.0 - speakeasyVersion: 1.61.0 - generationVersion: 2.70.0 + speakeasyVersion: 1.62.1 + generationVersion: 2.70.2 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.11.0 + version: 2.11.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 76736ea..07ebff3 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.11.0", + "version": "2.11.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.11.0", + "version": "2.11.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index a9ac932..9fe2821 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.11.0", + "version": "2.11.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 89f6d7e..34e121b 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.11.0"; - genVersion = "2.70.0"; + sdkVersion = "2.11.1"; + genVersion = "2.70.2"; public constructor(init?: Partial) { Object.assign(this, init); From c9fcabf4766c52e16a50cb134bbeb700cbcd769b Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sat, 22 Jul 2023 01:09:19 +0000 Subject: [PATCH 27/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.64.0 --- .eslintrc.yml | 0 RELEASES.md | 10 +++++++++- USAGE.md | 0 docs/models/operations/cancelfinetunerequest.md | 0 docs/models/operations/cancelfinetuneresponse.md | 0 docs/models/operations/createchatcompletionresponse.md | 0 docs/models/operations/createcompletionresponse.md | 0 docs/models/operations/createeditresponse.md | 0 docs/models/operations/createembeddingresponse.md | 0 docs/models/operations/createfileresponse.md | 0 docs/models/operations/createfinetuneresponse.md | 0 docs/models/operations/createimageeditresponse.md | 0 docs/models/operations/createimageresponse.md | 0 docs/models/operations/createimagevariationresponse.md | 0 docs/models/operations/createmoderationresponse.md | 0 docs/models/operations/createtranscriptionresponse.md | 0 docs/models/operations/createtranslationresponse.md | 0 docs/models/operations/deletefilerequest.md | 0 docs/models/operations/deletefileresponse.md | 0 docs/models/operations/deletemodelrequest.md | 0 docs/models/operations/deletemodelresponse.md | 0 docs/models/operations/downloadfilerequest.md | 0 docs/models/operations/downloadfileresponse.md | 0 docs/models/operations/listfilesresponse.md | 0 docs/models/operations/listfinetuneeventsrequest.md | 0 docs/models/operations/listfinetuneeventsresponse.md | 0 docs/models/operations/listfinetunesresponse.md | 0 docs/models/operations/listmodelsresponse.md | 0 docs/models/operations/retrievefilerequest.md | 0 docs/models/operations/retrievefileresponse.md | 0 docs/models/operations/retrievefinetunerequest.md | 0 docs/models/operations/retrievefinetuneresponse.md | 0 docs/models/operations/retrievemodelrequest.md | 0 docs/models/operations/retrievemodelresponse.md | 0 docs/models/shared/chatcompletionfunctions.md | 0 docs/models/shared/chatcompletionrequestmessage.md | 0 .../shared/chatcompletionrequestmessagefunctioncall.md | 0 docs/models/shared/chatcompletionrequestmessagerole.md | 0 docs/models/shared/chatcompletionresponsemessage.md | 0 .../chatcompletionresponsemessagefunctioncall.md | 0 .../models/shared/chatcompletionresponsemessagerole.md | 0 docs/models/shared/createchatcompletionrequest.md | 0 .../shared/createchatcompletionrequestfunctioncall1.md | 0 .../shared/createchatcompletionrequestfunctioncall2.md | 0 .../models/shared/createchatcompletionrequestmodel2.md | 0 docs/models/shared/createchatcompletionresponse.md | 0 .../shared/createchatcompletionresponsechoices.md | 0 .../createchatcompletionresponsechoicesfinishreason.md | 0 .../models/shared/createchatcompletionresponseusage.md | 0 docs/models/shared/createcompletionrequest.md | 0 docs/models/shared/createcompletionrequestmodel2.md | 0 docs/models/shared/createcompletionresponse.md | 0 docs/models/shared/createcompletionresponsechoices.md | 0 .../createcompletionresponsechoicesfinishreason.md | 0 .../shared/createcompletionresponsechoiceslogprobs.md | 0 docs/models/shared/createcompletionresponseusage.md | 0 docs/models/shared/createeditrequest.md | 0 docs/models/shared/createeditrequestmodel2.md | 0 docs/models/shared/createeditresponse.md | 0 docs/models/shared/createeditresponsechoices.md | 0 .../shared/createeditresponsechoicesfinishreason.md | 0 docs/models/shared/createeditresponseusage.md | 0 docs/models/shared/createembeddingrequest.md | 0 docs/models/shared/createembeddingrequestmodel2.md | 0 docs/models/shared/createembeddingresponse.md | 0 docs/models/shared/createembeddingresponsedata.md | 0 docs/models/shared/createembeddingresponseusage.md | 0 docs/models/shared/createfilerequest.md | 0 docs/models/shared/createfilerequestfile.md | 0 docs/models/shared/createfinetunerequest.md | 0 docs/models/shared/createfinetunerequestmodel2.md | 0 docs/models/shared/createimageeditrequest2.md | 0 docs/models/shared/createimageeditrequestimage.md | 0 docs/models/shared/createimageeditrequestmask.md | 0 .../shared/createimageeditrequestresponseformat.md | 0 docs/models/shared/createimageeditrequestsize.md | 0 docs/models/shared/createimagerequest.md | 0 docs/models/shared/createimagerequestresponseformat.md | 0 docs/models/shared/createimagerequestsize.md | 0 docs/models/shared/createimagevariationrequest2.md | 0 docs/models/shared/createimagevariationrequestimage.md | 0 .../createimagevariationrequestresponseformat.md | 0 docs/models/shared/createimagevariationrequestsize.md | 0 docs/models/shared/createmoderationrequest.md | 0 docs/models/shared/createmoderationrequestmodel2.md | 0 docs/models/shared/createmoderationresponse.md | 0 docs/models/shared/createmoderationresponseresults.md | 0 .../createmoderationresponseresultscategories.md | 0 .../createmoderationresponseresultscategoryscores.md | 0 docs/models/shared/createtranscriptionrequest1.md | 0 docs/models/shared/createtranscriptionrequestfile.md | 0 docs/models/shared/createtranscriptionrequestmodel2.md | 0 .../shared/createtranscriptionrequestresponseformat.md | 0 docs/models/shared/createtranscriptionresponse.md | 0 docs/models/shared/createtranslationrequest.md | 0 docs/models/shared/createtranslationrequestfile.md | 0 docs/models/shared/createtranslationrequestmodel2.md | 0 docs/models/shared/createtranslationresponse.md | 0 docs/models/shared/deletefileresponse.md | 0 docs/models/shared/deletemodelresponse.md | 0 docs/models/shared/finetune.md | 0 docs/models/shared/finetuneevent.md | 0 docs/models/shared/finetunehyperparams.md | 0 docs/models/shared/imagesresponse.md | 0 docs/models/shared/imagesresponsedata.md | 0 docs/models/shared/listfilesresponse.md | 0 docs/models/shared/listfinetuneeventsresponse.md | 0 docs/models/shared/listfinetunesresponse.md | 0 docs/models/shared/listmodelsresponse.md | 0 docs/models/shared/model.md | 0 docs/models/shared/openaifile.md | 0 docs/sdks/gpt/README.md | 0 docs/sdks/openai/README.md | 0 gen.yaml | 6 +++--- jest.config.js | 0 package-lock.json | 4 ++-- package.json | 2 +- src/index.ts | 0 src/internal/utils/contenttype.ts | 0 src/internal/utils/headers.ts | 0 src/internal/utils/index.ts | 0 src/internal/utils/pathparams.ts | 0 src/internal/utils/queryparams.ts | 0 src/internal/utils/requestbody.ts | 0 src/internal/utils/retries.ts | 0 src/internal/utils/security.ts | 0 src/internal/utils/utils.ts | 0 src/sdk/index.ts | 0 src/sdk/models/errors/index.ts | 0 src/sdk/models/errors/sdkerror.ts | 0 src/sdk/models/operations/cancelfinetune.ts | 0 src/sdk/models/operations/createchatcompletion.ts | 0 src/sdk/models/operations/createcompletion.ts | 0 src/sdk/models/operations/createedit.ts | 0 src/sdk/models/operations/createembedding.ts | 0 src/sdk/models/operations/createfile.ts | 0 src/sdk/models/operations/createfinetune.ts | 0 src/sdk/models/operations/createimage.ts | 0 src/sdk/models/operations/createimageedit.ts | 0 src/sdk/models/operations/createimagevariation.ts | 0 src/sdk/models/operations/createmoderation.ts | 0 src/sdk/models/operations/createtranscription.ts | 0 src/sdk/models/operations/createtranslation.ts | 0 src/sdk/models/operations/deletefile.ts | 0 src/sdk/models/operations/deletemodel.ts | 0 src/sdk/models/operations/downloadfile.ts | 0 src/sdk/models/operations/index.ts | 0 src/sdk/models/operations/listfiles.ts | 0 src/sdk/models/operations/listfinetuneevents.ts | 0 src/sdk/models/operations/listfinetunes.ts | 0 src/sdk/models/operations/listmodels.ts | 0 src/sdk/models/operations/retrievefile.ts | 0 src/sdk/models/operations/retrievefinetune.ts | 0 src/sdk/models/operations/retrievemodel.ts | 0 src/sdk/models/shared/chatcompletionfunctions.ts | 0 src/sdk/models/shared/chatcompletionrequestmessage.ts | 0 src/sdk/models/shared/chatcompletionresponsemessage.ts | 0 src/sdk/models/shared/createchatcompletionrequest.ts | 0 src/sdk/models/shared/createchatcompletionresponse.ts | 0 src/sdk/models/shared/createcompletionrequest.ts | 0 src/sdk/models/shared/createcompletionresponse.ts | 0 src/sdk/models/shared/createeditrequest.ts | 0 src/sdk/models/shared/createeditresponse.ts | 0 src/sdk/models/shared/createembeddingrequest.ts | 0 src/sdk/models/shared/createembeddingresponse.ts | 0 src/sdk/models/shared/createfilerequest.ts | 0 src/sdk/models/shared/createfinetunerequest.ts | 0 src/sdk/models/shared/createimageeditrequest2.ts | 0 src/sdk/models/shared/createimagerequest.ts | 0 src/sdk/models/shared/createimagevariationrequest2.ts | 0 src/sdk/models/shared/createmoderationrequest.ts | 0 src/sdk/models/shared/createmoderationresponse.ts | 0 src/sdk/models/shared/createtranscriptionrequest1.ts | 0 src/sdk/models/shared/createtranscriptionresponse.ts | 0 src/sdk/models/shared/createtranslationrequest.ts | 0 src/sdk/models/shared/createtranslationresponse.ts | 0 src/sdk/models/shared/deletefileresponse.ts | 0 src/sdk/models/shared/deletemodelresponse.ts | 0 src/sdk/models/shared/finetune.ts | 0 src/sdk/models/shared/finetuneevent.ts | 0 src/sdk/models/shared/imagesresponse.ts | 0 src/sdk/models/shared/index.ts | 0 src/sdk/models/shared/listfilesresponse.ts | 0 src/sdk/models/shared/listfinetuneeventsresponse.ts | 0 src/sdk/models/shared/listfinetunesresponse.ts | 0 src/sdk/models/shared/listmodelsresponse.ts | 0 src/sdk/models/shared/model.ts | 0 src/sdk/models/shared/openaifile.ts | 0 src/sdk/openai.ts | 0 src/sdk/sdk.ts | 4 ++-- src/sdk/types/index.ts | 0 src/sdk/types/rfcdate.ts | 0 tsconfig.json | 0 193 files changed, 17 insertions(+), 9 deletions(-) mode change 100755 => 100644 .eslintrc.yml mode change 100755 => 100644 USAGE.md mode change 100755 => 100644 docs/models/operations/cancelfinetunerequest.md mode change 100755 => 100644 docs/models/operations/cancelfinetuneresponse.md mode change 100755 => 100644 docs/models/operations/createchatcompletionresponse.md mode change 100755 => 100644 docs/models/operations/createcompletionresponse.md mode change 100755 => 100644 docs/models/operations/createeditresponse.md mode change 100755 => 100644 docs/models/operations/createembeddingresponse.md mode change 100755 => 100644 docs/models/operations/createfileresponse.md mode change 100755 => 100644 docs/models/operations/createfinetuneresponse.md mode change 100755 => 100644 docs/models/operations/createimageeditresponse.md mode change 100755 => 100644 docs/models/operations/createimageresponse.md mode change 100755 => 100644 docs/models/operations/createimagevariationresponse.md mode change 100755 => 100644 docs/models/operations/createmoderationresponse.md mode change 100755 => 100644 docs/models/operations/createtranscriptionresponse.md mode change 100755 => 100644 docs/models/operations/createtranslationresponse.md mode change 100755 => 100644 docs/models/operations/deletefilerequest.md mode change 100755 => 100644 docs/models/operations/deletefileresponse.md mode change 100755 => 100644 docs/models/operations/deletemodelrequest.md mode change 100755 => 100644 docs/models/operations/deletemodelresponse.md mode change 100755 => 100644 docs/models/operations/downloadfilerequest.md mode change 100755 => 100644 docs/models/operations/downloadfileresponse.md mode change 100755 => 100644 docs/models/operations/listfilesresponse.md mode change 100755 => 100644 docs/models/operations/listfinetuneeventsrequest.md mode change 100755 => 100644 docs/models/operations/listfinetuneeventsresponse.md mode change 100755 => 100644 docs/models/operations/listfinetunesresponse.md mode change 100755 => 100644 docs/models/operations/listmodelsresponse.md mode change 100755 => 100644 docs/models/operations/retrievefilerequest.md mode change 100755 => 100644 docs/models/operations/retrievefileresponse.md mode change 100755 => 100644 docs/models/operations/retrievefinetunerequest.md mode change 100755 => 100644 docs/models/operations/retrievefinetuneresponse.md mode change 100755 => 100644 docs/models/operations/retrievemodelrequest.md mode change 100755 => 100644 docs/models/operations/retrievemodelresponse.md mode change 100755 => 100644 docs/models/shared/chatcompletionfunctions.md mode change 100755 => 100644 docs/models/shared/chatcompletionrequestmessage.md mode change 100755 => 100644 docs/models/shared/chatcompletionrequestmessagefunctioncall.md mode change 100755 => 100644 docs/models/shared/chatcompletionrequestmessagerole.md mode change 100755 => 100644 docs/models/shared/chatcompletionresponsemessage.md mode change 100755 => 100644 docs/models/shared/chatcompletionresponsemessagefunctioncall.md mode change 100755 => 100644 docs/models/shared/chatcompletionresponsemessagerole.md mode change 100755 => 100644 docs/models/shared/createchatcompletionrequest.md mode change 100755 => 100644 docs/models/shared/createchatcompletionrequestfunctioncall1.md mode change 100755 => 100644 docs/models/shared/createchatcompletionrequestfunctioncall2.md mode change 100755 => 100644 docs/models/shared/createchatcompletionrequestmodel2.md mode change 100755 => 100644 docs/models/shared/createchatcompletionresponse.md mode change 100755 => 100644 docs/models/shared/createchatcompletionresponsechoices.md mode change 100755 => 100644 docs/models/shared/createchatcompletionresponsechoicesfinishreason.md mode change 100755 => 100644 docs/models/shared/createchatcompletionresponseusage.md mode change 100755 => 100644 docs/models/shared/createcompletionrequest.md mode change 100755 => 100644 docs/models/shared/createcompletionrequestmodel2.md mode change 100755 => 100644 docs/models/shared/createcompletionresponse.md mode change 100755 => 100644 docs/models/shared/createcompletionresponsechoices.md mode change 100755 => 100644 docs/models/shared/createcompletionresponsechoicesfinishreason.md mode change 100755 => 100644 docs/models/shared/createcompletionresponsechoiceslogprobs.md mode change 100755 => 100644 docs/models/shared/createcompletionresponseusage.md mode change 100755 => 100644 docs/models/shared/createeditrequest.md mode change 100755 => 100644 docs/models/shared/createeditrequestmodel2.md mode change 100755 => 100644 docs/models/shared/createeditresponse.md mode change 100755 => 100644 docs/models/shared/createeditresponsechoices.md mode change 100755 => 100644 docs/models/shared/createeditresponsechoicesfinishreason.md mode change 100755 => 100644 docs/models/shared/createeditresponseusage.md mode change 100755 => 100644 docs/models/shared/createembeddingrequest.md mode change 100755 => 100644 docs/models/shared/createembeddingrequestmodel2.md mode change 100755 => 100644 docs/models/shared/createembeddingresponse.md mode change 100755 => 100644 docs/models/shared/createembeddingresponsedata.md mode change 100755 => 100644 docs/models/shared/createembeddingresponseusage.md mode change 100755 => 100644 docs/models/shared/createfilerequest.md mode change 100755 => 100644 docs/models/shared/createfilerequestfile.md mode change 100755 => 100644 docs/models/shared/createfinetunerequest.md mode change 100755 => 100644 docs/models/shared/createfinetunerequestmodel2.md mode change 100755 => 100644 docs/models/shared/createimageeditrequest2.md mode change 100755 => 100644 docs/models/shared/createimageeditrequestimage.md mode change 100755 => 100644 docs/models/shared/createimageeditrequestmask.md mode change 100755 => 100644 docs/models/shared/createimageeditrequestresponseformat.md mode change 100755 => 100644 docs/models/shared/createimageeditrequestsize.md mode change 100755 => 100644 docs/models/shared/createimagerequest.md mode change 100755 => 100644 docs/models/shared/createimagerequestresponseformat.md mode change 100755 => 100644 docs/models/shared/createimagerequestsize.md mode change 100755 => 100644 docs/models/shared/createimagevariationrequest2.md mode change 100755 => 100644 docs/models/shared/createimagevariationrequestimage.md mode change 100755 => 100644 docs/models/shared/createimagevariationrequestresponseformat.md mode change 100755 => 100644 docs/models/shared/createimagevariationrequestsize.md mode change 100755 => 100644 docs/models/shared/createmoderationrequest.md mode change 100755 => 100644 docs/models/shared/createmoderationrequestmodel2.md mode change 100755 => 100644 docs/models/shared/createmoderationresponse.md mode change 100755 => 100644 docs/models/shared/createmoderationresponseresults.md mode change 100755 => 100644 docs/models/shared/createmoderationresponseresultscategories.md mode change 100755 => 100644 docs/models/shared/createmoderationresponseresultscategoryscores.md mode change 100755 => 100644 docs/models/shared/createtranscriptionrequest1.md mode change 100755 => 100644 docs/models/shared/createtranscriptionrequestfile.md mode change 100755 => 100644 docs/models/shared/createtranscriptionrequestmodel2.md mode change 100755 => 100644 docs/models/shared/createtranscriptionrequestresponseformat.md mode change 100755 => 100644 docs/models/shared/createtranscriptionresponse.md mode change 100755 => 100644 docs/models/shared/createtranslationrequest.md mode change 100755 => 100644 docs/models/shared/createtranslationrequestfile.md mode change 100755 => 100644 docs/models/shared/createtranslationrequestmodel2.md mode change 100755 => 100644 docs/models/shared/createtranslationresponse.md mode change 100755 => 100644 docs/models/shared/deletefileresponse.md mode change 100755 => 100644 docs/models/shared/deletemodelresponse.md mode change 100755 => 100644 docs/models/shared/finetune.md mode change 100755 => 100644 docs/models/shared/finetuneevent.md mode change 100755 => 100644 docs/models/shared/finetunehyperparams.md mode change 100755 => 100644 docs/models/shared/imagesresponse.md mode change 100755 => 100644 docs/models/shared/imagesresponsedata.md mode change 100755 => 100644 docs/models/shared/listfilesresponse.md mode change 100755 => 100644 docs/models/shared/listfinetuneeventsresponse.md mode change 100755 => 100644 docs/models/shared/listfinetunesresponse.md mode change 100755 => 100644 docs/models/shared/listmodelsresponse.md mode change 100755 => 100644 docs/models/shared/model.md mode change 100755 => 100644 docs/models/shared/openaifile.md mode change 100755 => 100644 docs/sdks/gpt/README.md mode change 100755 => 100644 docs/sdks/openai/README.md mode change 100755 => 100644 jest.config.js mode change 100755 => 100644 package-lock.json mode change 100755 => 100644 package.json mode change 100755 => 100644 src/index.ts mode change 100755 => 100644 src/internal/utils/contenttype.ts mode change 100755 => 100644 src/internal/utils/headers.ts mode change 100755 => 100644 src/internal/utils/index.ts mode change 100755 => 100644 src/internal/utils/pathparams.ts mode change 100755 => 100644 src/internal/utils/queryparams.ts mode change 100755 => 100644 src/internal/utils/requestbody.ts mode change 100755 => 100644 src/internal/utils/retries.ts mode change 100755 => 100644 src/internal/utils/security.ts mode change 100755 => 100644 src/internal/utils/utils.ts mode change 100755 => 100644 src/sdk/index.ts mode change 100755 => 100644 src/sdk/models/errors/index.ts mode change 100755 => 100644 src/sdk/models/errors/sdkerror.ts mode change 100755 => 100644 src/sdk/models/operations/cancelfinetune.ts mode change 100755 => 100644 src/sdk/models/operations/createchatcompletion.ts mode change 100755 => 100644 src/sdk/models/operations/createcompletion.ts mode change 100755 => 100644 src/sdk/models/operations/createedit.ts mode change 100755 => 100644 src/sdk/models/operations/createembedding.ts mode change 100755 => 100644 src/sdk/models/operations/createfile.ts mode change 100755 => 100644 src/sdk/models/operations/createfinetune.ts mode change 100755 => 100644 src/sdk/models/operations/createimage.ts mode change 100755 => 100644 src/sdk/models/operations/createimageedit.ts mode change 100755 => 100644 src/sdk/models/operations/createimagevariation.ts mode change 100755 => 100644 src/sdk/models/operations/createmoderation.ts mode change 100755 => 100644 src/sdk/models/operations/createtranscription.ts mode change 100755 => 100644 src/sdk/models/operations/createtranslation.ts mode change 100755 => 100644 src/sdk/models/operations/deletefile.ts mode change 100755 => 100644 src/sdk/models/operations/deletemodel.ts mode change 100755 => 100644 src/sdk/models/operations/downloadfile.ts mode change 100755 => 100644 src/sdk/models/operations/index.ts mode change 100755 => 100644 src/sdk/models/operations/listfiles.ts mode change 100755 => 100644 src/sdk/models/operations/listfinetuneevents.ts mode change 100755 => 100644 src/sdk/models/operations/listfinetunes.ts mode change 100755 => 100644 src/sdk/models/operations/listmodels.ts mode change 100755 => 100644 src/sdk/models/operations/retrievefile.ts mode change 100755 => 100644 src/sdk/models/operations/retrievefinetune.ts mode change 100755 => 100644 src/sdk/models/operations/retrievemodel.ts mode change 100755 => 100644 src/sdk/models/shared/chatcompletionfunctions.ts mode change 100755 => 100644 src/sdk/models/shared/chatcompletionrequestmessage.ts mode change 100755 => 100644 src/sdk/models/shared/chatcompletionresponsemessage.ts mode change 100755 => 100644 src/sdk/models/shared/createchatcompletionrequest.ts mode change 100755 => 100644 src/sdk/models/shared/createchatcompletionresponse.ts mode change 100755 => 100644 src/sdk/models/shared/createcompletionrequest.ts mode change 100755 => 100644 src/sdk/models/shared/createcompletionresponse.ts mode change 100755 => 100644 src/sdk/models/shared/createeditrequest.ts mode change 100755 => 100644 src/sdk/models/shared/createeditresponse.ts mode change 100755 => 100644 src/sdk/models/shared/createembeddingrequest.ts mode change 100755 => 100644 src/sdk/models/shared/createembeddingresponse.ts mode change 100755 => 100644 src/sdk/models/shared/createfilerequest.ts mode change 100755 => 100644 src/sdk/models/shared/createfinetunerequest.ts mode change 100755 => 100644 src/sdk/models/shared/createimageeditrequest2.ts mode change 100755 => 100644 src/sdk/models/shared/createimagerequest.ts mode change 100755 => 100644 src/sdk/models/shared/createimagevariationrequest2.ts mode change 100755 => 100644 src/sdk/models/shared/createmoderationrequest.ts mode change 100755 => 100644 src/sdk/models/shared/createmoderationresponse.ts mode change 100755 => 100644 src/sdk/models/shared/createtranscriptionrequest1.ts mode change 100755 => 100644 src/sdk/models/shared/createtranscriptionresponse.ts mode change 100755 => 100644 src/sdk/models/shared/createtranslationrequest.ts mode change 100755 => 100644 src/sdk/models/shared/createtranslationresponse.ts mode change 100755 => 100644 src/sdk/models/shared/deletefileresponse.ts mode change 100755 => 100644 src/sdk/models/shared/deletemodelresponse.ts mode change 100755 => 100644 src/sdk/models/shared/finetune.ts mode change 100755 => 100644 src/sdk/models/shared/finetuneevent.ts mode change 100755 => 100644 src/sdk/models/shared/imagesresponse.ts mode change 100755 => 100644 src/sdk/models/shared/index.ts mode change 100755 => 100644 src/sdk/models/shared/listfilesresponse.ts mode change 100755 => 100644 src/sdk/models/shared/listfinetuneeventsresponse.ts mode change 100755 => 100644 src/sdk/models/shared/listfinetunesresponse.ts mode change 100755 => 100644 src/sdk/models/shared/listmodelsresponse.ts mode change 100755 => 100644 src/sdk/models/shared/model.ts mode change 100755 => 100644 src/sdk/models/shared/openaifile.ts mode change 100755 => 100644 src/sdk/openai.ts mode change 100755 => 100644 src/sdk/sdk.ts mode change 100755 => 100644 src/sdk/types/index.ts mode change 100755 => 100644 src/sdk/types/rfcdate.ts mode change 100755 => 100644 tsconfig.json diff --git a/.eslintrc.yml b/.eslintrc.yml old mode 100755 new mode 100644 diff --git a/RELEASES.md b/RELEASES.md index 3c5dfb1..e5cc398 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -356,4 +356,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.62.1 (2.70.2) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.11.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.11.1 - . \ No newline at end of file +- [NPM v2.11.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.11.1 - . + +## 2023-07-22 01:08:59 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.64.0 (2.71.0) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.12.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.12.0 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/cancelfinetunerequest.md b/docs/models/operations/cancelfinetunerequest.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/cancelfinetuneresponse.md b/docs/models/operations/cancelfinetuneresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/createchatcompletionresponse.md b/docs/models/operations/createchatcompletionresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/createcompletionresponse.md b/docs/models/operations/createcompletionresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/createeditresponse.md b/docs/models/operations/createeditresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/createembeddingresponse.md b/docs/models/operations/createembeddingresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/createfileresponse.md b/docs/models/operations/createfileresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/createfinetuneresponse.md b/docs/models/operations/createfinetuneresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/createimageeditresponse.md b/docs/models/operations/createimageeditresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/createimageresponse.md b/docs/models/operations/createimageresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/createimagevariationresponse.md b/docs/models/operations/createimagevariationresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/createmoderationresponse.md b/docs/models/operations/createmoderationresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/createtranscriptionresponse.md b/docs/models/operations/createtranscriptionresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/createtranslationresponse.md b/docs/models/operations/createtranslationresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/deletefilerequest.md b/docs/models/operations/deletefilerequest.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/deletefileresponse.md b/docs/models/operations/deletefileresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/deletemodelrequest.md b/docs/models/operations/deletemodelrequest.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/deletemodelresponse.md b/docs/models/operations/deletemodelresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/downloadfilerequest.md b/docs/models/operations/downloadfilerequest.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/downloadfileresponse.md b/docs/models/operations/downloadfileresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/listfilesresponse.md b/docs/models/operations/listfilesresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/listfinetuneeventsrequest.md b/docs/models/operations/listfinetuneeventsrequest.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/listfinetuneeventsresponse.md b/docs/models/operations/listfinetuneeventsresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/listfinetunesresponse.md b/docs/models/operations/listfinetunesresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/listmodelsresponse.md b/docs/models/operations/listmodelsresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/retrievefilerequest.md b/docs/models/operations/retrievefilerequest.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/retrievefileresponse.md b/docs/models/operations/retrievefileresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/retrievefinetunerequest.md b/docs/models/operations/retrievefinetunerequest.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/retrievefinetuneresponse.md b/docs/models/operations/retrievefinetuneresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/retrievemodelrequest.md b/docs/models/operations/retrievemodelrequest.md old mode 100755 new mode 100644 diff --git a/docs/models/operations/retrievemodelresponse.md b/docs/models/operations/retrievemodelresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/chatcompletionfunctions.md b/docs/models/shared/chatcompletionfunctions.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/chatcompletionrequestmessage.md b/docs/models/shared/chatcompletionrequestmessage.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/chatcompletionrequestmessagefunctioncall.md b/docs/models/shared/chatcompletionrequestmessagefunctioncall.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/chatcompletionrequestmessagerole.md b/docs/models/shared/chatcompletionrequestmessagerole.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/chatcompletionresponsemessage.md b/docs/models/shared/chatcompletionresponsemessage.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/chatcompletionresponsemessagefunctioncall.md b/docs/models/shared/chatcompletionresponsemessagefunctioncall.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/chatcompletionresponsemessagerole.md b/docs/models/shared/chatcompletionresponsemessagerole.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createchatcompletionrequest.md b/docs/models/shared/createchatcompletionrequest.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createchatcompletionrequestfunctioncall1.md b/docs/models/shared/createchatcompletionrequestfunctioncall1.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createchatcompletionrequestfunctioncall2.md b/docs/models/shared/createchatcompletionrequestfunctioncall2.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createchatcompletionrequestmodel2.md b/docs/models/shared/createchatcompletionrequestmodel2.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createchatcompletionresponse.md b/docs/models/shared/createchatcompletionresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createchatcompletionresponsechoices.md b/docs/models/shared/createchatcompletionresponsechoices.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md b/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createchatcompletionresponseusage.md b/docs/models/shared/createchatcompletionresponseusage.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createcompletionrequest.md b/docs/models/shared/createcompletionrequest.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createcompletionrequestmodel2.md b/docs/models/shared/createcompletionrequestmodel2.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createcompletionresponse.md b/docs/models/shared/createcompletionresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createcompletionresponsechoices.md b/docs/models/shared/createcompletionresponsechoices.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createcompletionresponsechoicesfinishreason.md b/docs/models/shared/createcompletionresponsechoicesfinishreason.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createcompletionresponsechoiceslogprobs.md b/docs/models/shared/createcompletionresponsechoiceslogprobs.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createcompletionresponseusage.md b/docs/models/shared/createcompletionresponseusage.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createeditrequest.md b/docs/models/shared/createeditrequest.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createeditrequestmodel2.md b/docs/models/shared/createeditrequestmodel2.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createeditresponse.md b/docs/models/shared/createeditresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createeditresponsechoices.md b/docs/models/shared/createeditresponsechoices.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createeditresponsechoicesfinishreason.md b/docs/models/shared/createeditresponsechoicesfinishreason.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createeditresponseusage.md b/docs/models/shared/createeditresponseusage.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createembeddingrequest.md b/docs/models/shared/createembeddingrequest.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createembeddingrequestmodel2.md b/docs/models/shared/createembeddingrequestmodel2.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createembeddingresponse.md b/docs/models/shared/createembeddingresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createembeddingresponsedata.md b/docs/models/shared/createembeddingresponsedata.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createembeddingresponseusage.md b/docs/models/shared/createembeddingresponseusage.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createfilerequest.md b/docs/models/shared/createfilerequest.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createfilerequestfile.md b/docs/models/shared/createfilerequestfile.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createfinetunerequest.md b/docs/models/shared/createfinetunerequest.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createfinetunerequestmodel2.md b/docs/models/shared/createfinetunerequestmodel2.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createimageeditrequest2.md b/docs/models/shared/createimageeditrequest2.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createimageeditrequestimage.md b/docs/models/shared/createimageeditrequestimage.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createimageeditrequestmask.md b/docs/models/shared/createimageeditrequestmask.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createimageeditrequestresponseformat.md b/docs/models/shared/createimageeditrequestresponseformat.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createimageeditrequestsize.md b/docs/models/shared/createimageeditrequestsize.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createimagerequest.md b/docs/models/shared/createimagerequest.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createimagerequestresponseformat.md b/docs/models/shared/createimagerequestresponseformat.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createimagerequestsize.md b/docs/models/shared/createimagerequestsize.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createimagevariationrequest2.md b/docs/models/shared/createimagevariationrequest2.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createimagevariationrequestimage.md b/docs/models/shared/createimagevariationrequestimage.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createimagevariationrequestresponseformat.md b/docs/models/shared/createimagevariationrequestresponseformat.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createimagevariationrequestsize.md b/docs/models/shared/createimagevariationrequestsize.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createmoderationrequest.md b/docs/models/shared/createmoderationrequest.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createmoderationrequestmodel2.md b/docs/models/shared/createmoderationrequestmodel2.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createmoderationresponse.md b/docs/models/shared/createmoderationresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createmoderationresponseresults.md b/docs/models/shared/createmoderationresponseresults.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createmoderationresponseresultscategories.md b/docs/models/shared/createmoderationresponseresultscategories.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createmoderationresponseresultscategoryscores.md b/docs/models/shared/createmoderationresponseresultscategoryscores.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createtranscriptionrequest1.md b/docs/models/shared/createtranscriptionrequest1.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createtranscriptionrequestfile.md b/docs/models/shared/createtranscriptionrequestfile.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createtranscriptionrequestmodel2.md b/docs/models/shared/createtranscriptionrequestmodel2.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createtranscriptionrequestresponseformat.md b/docs/models/shared/createtranscriptionrequestresponseformat.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createtranscriptionresponse.md b/docs/models/shared/createtranscriptionresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createtranslationrequest.md b/docs/models/shared/createtranslationrequest.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createtranslationrequestfile.md b/docs/models/shared/createtranslationrequestfile.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createtranslationrequestmodel2.md b/docs/models/shared/createtranslationrequestmodel2.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/createtranslationresponse.md b/docs/models/shared/createtranslationresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/deletefileresponse.md b/docs/models/shared/deletefileresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/deletemodelresponse.md b/docs/models/shared/deletemodelresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/finetune.md b/docs/models/shared/finetune.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/finetuneevent.md b/docs/models/shared/finetuneevent.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/finetunehyperparams.md b/docs/models/shared/finetunehyperparams.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/imagesresponse.md b/docs/models/shared/imagesresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/imagesresponsedata.md b/docs/models/shared/imagesresponsedata.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/listfilesresponse.md b/docs/models/shared/listfilesresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/listfinetuneeventsresponse.md b/docs/models/shared/listfinetuneeventsresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/listfinetunesresponse.md b/docs/models/shared/listfinetunesresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/listmodelsresponse.md b/docs/models/shared/listmodelsresponse.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/model.md b/docs/models/shared/model.md old mode 100755 new mode 100644 diff --git a/docs/models/shared/openaifile.md b/docs/models/shared/openaifile.md old mode 100755 new mode 100644 diff --git a/docs/sdks/gpt/README.md b/docs/sdks/gpt/README.md old mode 100755 new mode 100644 diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md old mode 100755 new mode 100644 diff --git a/gen.yaml b/gen.yaml index 4792623..3d00423 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 60758465b46e16e9341d0f17cc2819bd docVersion: 2.0.0 - speakeasyVersion: 1.62.1 - generationVersion: 2.70.2 + speakeasyVersion: 1.64.0 + generationVersion: 2.71.0 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.11.1 + version: 2.12.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/jest.config.js b/jest.config.js old mode 100755 new mode 100644 diff --git a/package-lock.json b/package-lock.json old mode 100755 new mode 100644 index 07ebff3..e788ed4 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.11.1", + "version": "2.12.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.11.1", + "version": "2.12.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json old mode 100755 new mode 100644 index 9fe2821..479f564 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.11.1", + "version": "2.12.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/index.ts b/src/index.ts old mode 100755 new mode 100644 diff --git a/src/internal/utils/contenttype.ts b/src/internal/utils/contenttype.ts old mode 100755 new mode 100644 diff --git a/src/internal/utils/headers.ts b/src/internal/utils/headers.ts old mode 100755 new mode 100644 diff --git a/src/internal/utils/index.ts b/src/internal/utils/index.ts old mode 100755 new mode 100644 diff --git a/src/internal/utils/pathparams.ts b/src/internal/utils/pathparams.ts old mode 100755 new mode 100644 diff --git a/src/internal/utils/queryparams.ts b/src/internal/utils/queryparams.ts old mode 100755 new mode 100644 diff --git a/src/internal/utils/requestbody.ts b/src/internal/utils/requestbody.ts old mode 100755 new mode 100644 diff --git a/src/internal/utils/retries.ts b/src/internal/utils/retries.ts old mode 100755 new mode 100644 diff --git a/src/internal/utils/security.ts b/src/internal/utils/security.ts old mode 100755 new mode 100644 diff --git a/src/internal/utils/utils.ts b/src/internal/utils/utils.ts old mode 100755 new mode 100644 diff --git a/src/sdk/index.ts b/src/sdk/index.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/errors/index.ts b/src/sdk/models/errors/index.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/errors/sdkerror.ts b/src/sdk/models/errors/sdkerror.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/cancelfinetune.ts b/src/sdk/models/operations/cancelfinetune.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/createchatcompletion.ts b/src/sdk/models/operations/createchatcompletion.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/createcompletion.ts b/src/sdk/models/operations/createcompletion.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/createedit.ts b/src/sdk/models/operations/createedit.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/createembedding.ts b/src/sdk/models/operations/createembedding.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/createfile.ts b/src/sdk/models/operations/createfile.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/createfinetune.ts b/src/sdk/models/operations/createfinetune.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/createimage.ts b/src/sdk/models/operations/createimage.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/createimageedit.ts b/src/sdk/models/operations/createimageedit.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/createimagevariation.ts b/src/sdk/models/operations/createimagevariation.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/createmoderation.ts b/src/sdk/models/operations/createmoderation.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/createtranscription.ts b/src/sdk/models/operations/createtranscription.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/createtranslation.ts b/src/sdk/models/operations/createtranslation.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/deletefile.ts b/src/sdk/models/operations/deletefile.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/deletemodel.ts b/src/sdk/models/operations/deletemodel.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/downloadfile.ts b/src/sdk/models/operations/downloadfile.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/index.ts b/src/sdk/models/operations/index.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/listfiles.ts b/src/sdk/models/operations/listfiles.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/listfinetuneevents.ts b/src/sdk/models/operations/listfinetuneevents.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/listfinetunes.ts b/src/sdk/models/operations/listfinetunes.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/listmodels.ts b/src/sdk/models/operations/listmodels.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/retrievefile.ts b/src/sdk/models/operations/retrievefile.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/retrievefinetune.ts b/src/sdk/models/operations/retrievefinetune.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/operations/retrievemodel.ts b/src/sdk/models/operations/retrievemodel.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/chatcompletionfunctions.ts b/src/sdk/models/shared/chatcompletionfunctions.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/chatcompletionrequestmessage.ts b/src/sdk/models/shared/chatcompletionrequestmessage.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/chatcompletionresponsemessage.ts b/src/sdk/models/shared/chatcompletionresponsemessage.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createchatcompletionrequest.ts b/src/sdk/models/shared/createchatcompletionrequest.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createchatcompletionresponse.ts b/src/sdk/models/shared/createchatcompletionresponse.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createcompletionrequest.ts b/src/sdk/models/shared/createcompletionrequest.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createcompletionresponse.ts b/src/sdk/models/shared/createcompletionresponse.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createeditrequest.ts b/src/sdk/models/shared/createeditrequest.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createeditresponse.ts b/src/sdk/models/shared/createeditresponse.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createembeddingrequest.ts b/src/sdk/models/shared/createembeddingrequest.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createembeddingresponse.ts b/src/sdk/models/shared/createembeddingresponse.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createfilerequest.ts b/src/sdk/models/shared/createfilerequest.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createfinetunerequest.ts b/src/sdk/models/shared/createfinetunerequest.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createimageeditrequest2.ts b/src/sdk/models/shared/createimageeditrequest2.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createimagerequest.ts b/src/sdk/models/shared/createimagerequest.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createimagevariationrequest2.ts b/src/sdk/models/shared/createimagevariationrequest2.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createmoderationrequest.ts b/src/sdk/models/shared/createmoderationrequest.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createmoderationresponse.ts b/src/sdk/models/shared/createmoderationresponse.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createtranscriptionrequest1.ts b/src/sdk/models/shared/createtranscriptionrequest1.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createtranscriptionresponse.ts b/src/sdk/models/shared/createtranscriptionresponse.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createtranslationrequest.ts b/src/sdk/models/shared/createtranslationrequest.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/createtranslationresponse.ts b/src/sdk/models/shared/createtranslationresponse.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/deletefileresponse.ts b/src/sdk/models/shared/deletefileresponse.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/deletemodelresponse.ts b/src/sdk/models/shared/deletemodelresponse.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/finetune.ts b/src/sdk/models/shared/finetune.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/finetuneevent.ts b/src/sdk/models/shared/finetuneevent.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/imagesresponse.ts b/src/sdk/models/shared/imagesresponse.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/index.ts b/src/sdk/models/shared/index.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/listfilesresponse.ts b/src/sdk/models/shared/listfilesresponse.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/listfinetuneeventsresponse.ts b/src/sdk/models/shared/listfinetuneeventsresponse.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/listfinetunesresponse.ts b/src/sdk/models/shared/listfinetunesresponse.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/listmodelsresponse.ts b/src/sdk/models/shared/listmodelsresponse.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/model.ts b/src/sdk/models/shared/model.ts old mode 100755 new mode 100644 diff --git a/src/sdk/models/shared/openaifile.ts b/src/sdk/models/shared/openaifile.ts old mode 100755 new mode 100644 diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts old mode 100755 new mode 100644 diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts old mode 100755 new mode 100644 index 34e121b..cf6988f --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.11.1"; - genVersion = "2.70.2"; + sdkVersion = "2.12.0"; + genVersion = "2.71.0"; public constructor(init?: Partial) { Object.assign(this, init); diff --git a/src/sdk/types/index.ts b/src/sdk/types/index.ts old mode 100755 new mode 100644 diff --git a/src/sdk/types/rfcdate.ts b/src/sdk/types/rfcdate.ts old mode 100755 new mode 100644 diff --git a/tsconfig.json b/tsconfig.json old mode 100755 new mode 100644 From d372151dd1c96c69302649717eee398d350de431 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 26 Jul 2023 01:08:59 +0000 Subject: [PATCH 28/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.65.0 --- .eslintrc.yml | 0 RELEASES.md | 10 +++++++++- USAGE.md | 0 docs/models/operations/cancelfinetunerequest.md | 0 docs/models/operations/cancelfinetuneresponse.md | 0 docs/models/operations/createchatcompletionresponse.md | 0 docs/models/operations/createcompletionresponse.md | 0 docs/models/operations/createeditresponse.md | 0 docs/models/operations/createembeddingresponse.md | 0 docs/models/operations/createfileresponse.md | 0 docs/models/operations/createfinetuneresponse.md | 0 docs/models/operations/createimageeditresponse.md | 0 docs/models/operations/createimageresponse.md | 0 docs/models/operations/createimagevariationresponse.md | 0 docs/models/operations/createmoderationresponse.md | 0 docs/models/operations/createtranscriptionresponse.md | 0 docs/models/operations/createtranslationresponse.md | 0 docs/models/operations/deletefilerequest.md | 0 docs/models/operations/deletefileresponse.md | 0 docs/models/operations/deletemodelrequest.md | 0 docs/models/operations/deletemodelresponse.md | 0 docs/models/operations/downloadfilerequest.md | 0 docs/models/operations/downloadfileresponse.md | 0 docs/models/operations/listfilesresponse.md | 0 docs/models/operations/listfinetuneeventsrequest.md | 0 docs/models/operations/listfinetuneeventsresponse.md | 0 docs/models/operations/listfinetunesresponse.md | 0 docs/models/operations/listmodelsresponse.md | 0 docs/models/operations/retrievefilerequest.md | 0 docs/models/operations/retrievefileresponse.md | 0 docs/models/operations/retrievefinetunerequest.md | 0 docs/models/operations/retrievefinetuneresponse.md | 0 docs/models/operations/retrievemodelrequest.md | 0 docs/models/operations/retrievemodelresponse.md | 0 docs/models/shared/chatcompletionfunctions.md | 0 docs/models/shared/chatcompletionrequestmessage.md | 0 .../shared/chatcompletionrequestmessagefunctioncall.md | 0 docs/models/shared/chatcompletionrequestmessagerole.md | 0 docs/models/shared/chatcompletionresponsemessage.md | 0 .../chatcompletionresponsemessagefunctioncall.md | 0 .../models/shared/chatcompletionresponsemessagerole.md | 0 docs/models/shared/createchatcompletionrequest.md | 0 .../shared/createchatcompletionrequestfunctioncall1.md | 0 .../shared/createchatcompletionrequestfunctioncall2.md | 0 .../models/shared/createchatcompletionrequestmodel2.md | 0 docs/models/shared/createchatcompletionresponse.md | 0 .../shared/createchatcompletionresponsechoices.md | 0 .../createchatcompletionresponsechoicesfinishreason.md | 0 .../models/shared/createchatcompletionresponseusage.md | 0 docs/models/shared/createcompletionrequest.md | 0 docs/models/shared/createcompletionrequestmodel2.md | 0 docs/models/shared/createcompletionresponse.md | 0 docs/models/shared/createcompletionresponsechoices.md | 0 .../createcompletionresponsechoicesfinishreason.md | 0 .../shared/createcompletionresponsechoiceslogprobs.md | 0 docs/models/shared/createcompletionresponseusage.md | 0 docs/models/shared/createeditrequest.md | 0 docs/models/shared/createeditrequestmodel2.md | 0 docs/models/shared/createeditresponse.md | 0 docs/models/shared/createeditresponsechoices.md | 0 .../shared/createeditresponsechoicesfinishreason.md | 0 docs/models/shared/createeditresponseusage.md | 0 docs/models/shared/createembeddingrequest.md | 0 docs/models/shared/createembeddingrequestmodel2.md | 0 docs/models/shared/createembeddingresponse.md | 0 docs/models/shared/createembeddingresponsedata.md | 0 docs/models/shared/createembeddingresponseusage.md | 0 docs/models/shared/createfilerequest.md | 0 docs/models/shared/createfilerequestfile.md | 0 docs/models/shared/createfinetunerequest.md | 0 docs/models/shared/createfinetunerequestmodel2.md | 0 docs/models/shared/createimageeditrequest2.md | 0 docs/models/shared/createimageeditrequestimage.md | 0 docs/models/shared/createimageeditrequestmask.md | 0 .../shared/createimageeditrequestresponseformat.md | 0 docs/models/shared/createimageeditrequestsize.md | 0 docs/models/shared/createimagerequest.md | 0 docs/models/shared/createimagerequestresponseformat.md | 0 docs/models/shared/createimagerequestsize.md | 0 docs/models/shared/createimagevariationrequest2.md | 0 docs/models/shared/createimagevariationrequestimage.md | 0 .../createimagevariationrequestresponseformat.md | 0 docs/models/shared/createimagevariationrequestsize.md | 0 docs/models/shared/createmoderationrequest.md | 0 docs/models/shared/createmoderationrequestmodel2.md | 0 docs/models/shared/createmoderationresponse.md | 0 docs/models/shared/createmoderationresponseresults.md | 0 .../createmoderationresponseresultscategories.md | 0 .../createmoderationresponseresultscategoryscores.md | 0 docs/models/shared/createtranscriptionrequest1.md | 0 docs/models/shared/createtranscriptionrequestfile.md | 0 docs/models/shared/createtranscriptionrequestmodel2.md | 0 .../shared/createtranscriptionrequestresponseformat.md | 0 docs/models/shared/createtranscriptionresponse.md | 0 docs/models/shared/createtranslationrequest.md | 0 docs/models/shared/createtranslationrequestfile.md | 0 docs/models/shared/createtranslationrequestmodel2.md | 0 docs/models/shared/createtranslationresponse.md | 0 docs/models/shared/deletefileresponse.md | 0 docs/models/shared/deletemodelresponse.md | 0 docs/models/shared/finetune.md | 0 docs/models/shared/finetuneevent.md | 0 docs/models/shared/finetunehyperparams.md | 0 docs/models/shared/imagesresponse.md | 0 docs/models/shared/imagesresponsedata.md | 0 docs/models/shared/listfilesresponse.md | 0 docs/models/shared/listfinetuneeventsresponse.md | 0 docs/models/shared/listfinetunesresponse.md | 0 docs/models/shared/listmodelsresponse.md | 0 docs/models/shared/model.md | 0 docs/models/shared/openaifile.md | 0 docs/sdks/gpt/README.md | 0 docs/sdks/openai/README.md | 0 gen.yaml | 6 +++--- jest.config.js | 0 package-lock.json | 4 ++-- package.json | 2 +- src/index.ts | 0 src/internal/utils/contenttype.ts | 0 src/internal/utils/headers.ts | 0 src/internal/utils/index.ts | 0 src/internal/utils/pathparams.ts | 0 src/internal/utils/queryparams.ts | 0 src/internal/utils/requestbody.ts | 0 src/internal/utils/retries.ts | 0 src/internal/utils/security.ts | 0 src/internal/utils/utils.ts | 0 src/sdk/index.ts | 0 src/sdk/models/errors/index.ts | 0 src/sdk/models/errors/sdkerror.ts | 0 src/sdk/models/operations/cancelfinetune.ts | 0 src/sdk/models/operations/createchatcompletion.ts | 0 src/sdk/models/operations/createcompletion.ts | 0 src/sdk/models/operations/createedit.ts | 0 src/sdk/models/operations/createembedding.ts | 0 src/sdk/models/operations/createfile.ts | 0 src/sdk/models/operations/createfinetune.ts | 0 src/sdk/models/operations/createimage.ts | 0 src/sdk/models/operations/createimageedit.ts | 0 src/sdk/models/operations/createimagevariation.ts | 0 src/sdk/models/operations/createmoderation.ts | 0 src/sdk/models/operations/createtranscription.ts | 0 src/sdk/models/operations/createtranslation.ts | 0 src/sdk/models/operations/deletefile.ts | 0 src/sdk/models/operations/deletemodel.ts | 0 src/sdk/models/operations/downloadfile.ts | 0 src/sdk/models/operations/index.ts | 0 src/sdk/models/operations/listfiles.ts | 0 src/sdk/models/operations/listfinetuneevents.ts | 0 src/sdk/models/operations/listfinetunes.ts | 0 src/sdk/models/operations/listmodels.ts | 0 src/sdk/models/operations/retrievefile.ts | 0 src/sdk/models/operations/retrievefinetune.ts | 0 src/sdk/models/operations/retrievemodel.ts | 0 src/sdk/models/shared/chatcompletionfunctions.ts | 0 src/sdk/models/shared/chatcompletionrequestmessage.ts | 0 src/sdk/models/shared/chatcompletionresponsemessage.ts | 0 src/sdk/models/shared/createchatcompletionrequest.ts | 0 src/sdk/models/shared/createchatcompletionresponse.ts | 0 src/sdk/models/shared/createcompletionrequest.ts | 0 src/sdk/models/shared/createcompletionresponse.ts | 0 src/sdk/models/shared/createeditrequest.ts | 0 src/sdk/models/shared/createeditresponse.ts | 0 src/sdk/models/shared/createembeddingrequest.ts | 0 src/sdk/models/shared/createembeddingresponse.ts | 0 src/sdk/models/shared/createfilerequest.ts | 0 src/sdk/models/shared/createfinetunerequest.ts | 0 src/sdk/models/shared/createimageeditrequest2.ts | 0 src/sdk/models/shared/createimagerequest.ts | 0 src/sdk/models/shared/createimagevariationrequest2.ts | 0 src/sdk/models/shared/createmoderationrequest.ts | 0 src/sdk/models/shared/createmoderationresponse.ts | 0 src/sdk/models/shared/createtranscriptionrequest1.ts | 0 src/sdk/models/shared/createtranscriptionresponse.ts | 0 src/sdk/models/shared/createtranslationrequest.ts | 0 src/sdk/models/shared/createtranslationresponse.ts | 0 src/sdk/models/shared/deletefileresponse.ts | 0 src/sdk/models/shared/deletemodelresponse.ts | 0 src/sdk/models/shared/finetune.ts | 0 src/sdk/models/shared/finetuneevent.ts | 0 src/sdk/models/shared/imagesresponse.ts | 0 src/sdk/models/shared/index.ts | 0 src/sdk/models/shared/listfilesresponse.ts | 0 src/sdk/models/shared/listfinetuneeventsresponse.ts | 0 src/sdk/models/shared/listfinetunesresponse.ts | 0 src/sdk/models/shared/listmodelsresponse.ts | 0 src/sdk/models/shared/model.ts | 0 src/sdk/models/shared/openaifile.ts | 0 src/sdk/openai.ts | 0 src/sdk/sdk.ts | 4 ++-- src/sdk/types/index.ts | 0 src/sdk/types/rfcdate.ts | 0 tsconfig.json | 0 193 files changed, 17 insertions(+), 9 deletions(-) mode change 100644 => 100755 .eslintrc.yml mode change 100644 => 100755 USAGE.md mode change 100644 => 100755 docs/models/operations/cancelfinetunerequest.md mode change 100644 => 100755 docs/models/operations/cancelfinetuneresponse.md mode change 100644 => 100755 docs/models/operations/createchatcompletionresponse.md mode change 100644 => 100755 docs/models/operations/createcompletionresponse.md mode change 100644 => 100755 docs/models/operations/createeditresponse.md mode change 100644 => 100755 docs/models/operations/createembeddingresponse.md mode change 100644 => 100755 docs/models/operations/createfileresponse.md mode change 100644 => 100755 docs/models/operations/createfinetuneresponse.md mode change 100644 => 100755 docs/models/operations/createimageeditresponse.md mode change 100644 => 100755 docs/models/operations/createimageresponse.md mode change 100644 => 100755 docs/models/operations/createimagevariationresponse.md mode change 100644 => 100755 docs/models/operations/createmoderationresponse.md mode change 100644 => 100755 docs/models/operations/createtranscriptionresponse.md mode change 100644 => 100755 docs/models/operations/createtranslationresponse.md mode change 100644 => 100755 docs/models/operations/deletefilerequest.md mode change 100644 => 100755 docs/models/operations/deletefileresponse.md mode change 100644 => 100755 docs/models/operations/deletemodelrequest.md mode change 100644 => 100755 docs/models/operations/deletemodelresponse.md mode change 100644 => 100755 docs/models/operations/downloadfilerequest.md mode change 100644 => 100755 docs/models/operations/downloadfileresponse.md mode change 100644 => 100755 docs/models/operations/listfilesresponse.md mode change 100644 => 100755 docs/models/operations/listfinetuneeventsrequest.md mode change 100644 => 100755 docs/models/operations/listfinetuneeventsresponse.md mode change 100644 => 100755 docs/models/operations/listfinetunesresponse.md mode change 100644 => 100755 docs/models/operations/listmodelsresponse.md mode change 100644 => 100755 docs/models/operations/retrievefilerequest.md mode change 100644 => 100755 docs/models/operations/retrievefileresponse.md mode change 100644 => 100755 docs/models/operations/retrievefinetunerequest.md mode change 100644 => 100755 docs/models/operations/retrievefinetuneresponse.md mode change 100644 => 100755 docs/models/operations/retrievemodelrequest.md mode change 100644 => 100755 docs/models/operations/retrievemodelresponse.md mode change 100644 => 100755 docs/models/shared/chatcompletionfunctions.md mode change 100644 => 100755 docs/models/shared/chatcompletionrequestmessage.md mode change 100644 => 100755 docs/models/shared/chatcompletionrequestmessagefunctioncall.md mode change 100644 => 100755 docs/models/shared/chatcompletionrequestmessagerole.md mode change 100644 => 100755 docs/models/shared/chatcompletionresponsemessage.md mode change 100644 => 100755 docs/models/shared/chatcompletionresponsemessagefunctioncall.md mode change 100644 => 100755 docs/models/shared/chatcompletionresponsemessagerole.md mode change 100644 => 100755 docs/models/shared/createchatcompletionrequest.md mode change 100644 => 100755 docs/models/shared/createchatcompletionrequestfunctioncall1.md mode change 100644 => 100755 docs/models/shared/createchatcompletionrequestfunctioncall2.md mode change 100644 => 100755 docs/models/shared/createchatcompletionrequestmodel2.md mode change 100644 => 100755 docs/models/shared/createchatcompletionresponse.md mode change 100644 => 100755 docs/models/shared/createchatcompletionresponsechoices.md mode change 100644 => 100755 docs/models/shared/createchatcompletionresponsechoicesfinishreason.md mode change 100644 => 100755 docs/models/shared/createchatcompletionresponseusage.md mode change 100644 => 100755 docs/models/shared/createcompletionrequest.md mode change 100644 => 100755 docs/models/shared/createcompletionrequestmodel2.md mode change 100644 => 100755 docs/models/shared/createcompletionresponse.md mode change 100644 => 100755 docs/models/shared/createcompletionresponsechoices.md mode change 100644 => 100755 docs/models/shared/createcompletionresponsechoicesfinishreason.md mode change 100644 => 100755 docs/models/shared/createcompletionresponsechoiceslogprobs.md mode change 100644 => 100755 docs/models/shared/createcompletionresponseusage.md mode change 100644 => 100755 docs/models/shared/createeditrequest.md mode change 100644 => 100755 docs/models/shared/createeditrequestmodel2.md mode change 100644 => 100755 docs/models/shared/createeditresponse.md mode change 100644 => 100755 docs/models/shared/createeditresponsechoices.md mode change 100644 => 100755 docs/models/shared/createeditresponsechoicesfinishreason.md mode change 100644 => 100755 docs/models/shared/createeditresponseusage.md mode change 100644 => 100755 docs/models/shared/createembeddingrequest.md mode change 100644 => 100755 docs/models/shared/createembeddingrequestmodel2.md mode change 100644 => 100755 docs/models/shared/createembeddingresponse.md mode change 100644 => 100755 docs/models/shared/createembeddingresponsedata.md mode change 100644 => 100755 docs/models/shared/createembeddingresponseusage.md mode change 100644 => 100755 docs/models/shared/createfilerequest.md mode change 100644 => 100755 docs/models/shared/createfilerequestfile.md mode change 100644 => 100755 docs/models/shared/createfinetunerequest.md mode change 100644 => 100755 docs/models/shared/createfinetunerequestmodel2.md mode change 100644 => 100755 docs/models/shared/createimageeditrequest2.md mode change 100644 => 100755 docs/models/shared/createimageeditrequestimage.md mode change 100644 => 100755 docs/models/shared/createimageeditrequestmask.md mode change 100644 => 100755 docs/models/shared/createimageeditrequestresponseformat.md mode change 100644 => 100755 docs/models/shared/createimageeditrequestsize.md mode change 100644 => 100755 docs/models/shared/createimagerequest.md mode change 100644 => 100755 docs/models/shared/createimagerequestresponseformat.md mode change 100644 => 100755 docs/models/shared/createimagerequestsize.md mode change 100644 => 100755 docs/models/shared/createimagevariationrequest2.md mode change 100644 => 100755 docs/models/shared/createimagevariationrequestimage.md mode change 100644 => 100755 docs/models/shared/createimagevariationrequestresponseformat.md mode change 100644 => 100755 docs/models/shared/createimagevariationrequestsize.md mode change 100644 => 100755 docs/models/shared/createmoderationrequest.md mode change 100644 => 100755 docs/models/shared/createmoderationrequestmodel2.md mode change 100644 => 100755 docs/models/shared/createmoderationresponse.md mode change 100644 => 100755 docs/models/shared/createmoderationresponseresults.md mode change 100644 => 100755 docs/models/shared/createmoderationresponseresultscategories.md mode change 100644 => 100755 docs/models/shared/createmoderationresponseresultscategoryscores.md mode change 100644 => 100755 docs/models/shared/createtranscriptionrequest1.md mode change 100644 => 100755 docs/models/shared/createtranscriptionrequestfile.md mode change 100644 => 100755 docs/models/shared/createtranscriptionrequestmodel2.md mode change 100644 => 100755 docs/models/shared/createtranscriptionrequestresponseformat.md mode change 100644 => 100755 docs/models/shared/createtranscriptionresponse.md mode change 100644 => 100755 docs/models/shared/createtranslationrequest.md mode change 100644 => 100755 docs/models/shared/createtranslationrequestfile.md mode change 100644 => 100755 docs/models/shared/createtranslationrequestmodel2.md mode change 100644 => 100755 docs/models/shared/createtranslationresponse.md mode change 100644 => 100755 docs/models/shared/deletefileresponse.md mode change 100644 => 100755 docs/models/shared/deletemodelresponse.md mode change 100644 => 100755 docs/models/shared/finetune.md mode change 100644 => 100755 docs/models/shared/finetuneevent.md mode change 100644 => 100755 docs/models/shared/finetunehyperparams.md mode change 100644 => 100755 docs/models/shared/imagesresponse.md mode change 100644 => 100755 docs/models/shared/imagesresponsedata.md mode change 100644 => 100755 docs/models/shared/listfilesresponse.md mode change 100644 => 100755 docs/models/shared/listfinetuneeventsresponse.md mode change 100644 => 100755 docs/models/shared/listfinetunesresponse.md mode change 100644 => 100755 docs/models/shared/listmodelsresponse.md mode change 100644 => 100755 docs/models/shared/model.md mode change 100644 => 100755 docs/models/shared/openaifile.md mode change 100644 => 100755 docs/sdks/gpt/README.md mode change 100644 => 100755 docs/sdks/openai/README.md mode change 100644 => 100755 jest.config.js mode change 100644 => 100755 package-lock.json mode change 100644 => 100755 package.json mode change 100644 => 100755 src/index.ts mode change 100644 => 100755 src/internal/utils/contenttype.ts mode change 100644 => 100755 src/internal/utils/headers.ts mode change 100644 => 100755 src/internal/utils/index.ts mode change 100644 => 100755 src/internal/utils/pathparams.ts mode change 100644 => 100755 src/internal/utils/queryparams.ts mode change 100644 => 100755 src/internal/utils/requestbody.ts mode change 100644 => 100755 src/internal/utils/retries.ts mode change 100644 => 100755 src/internal/utils/security.ts mode change 100644 => 100755 src/internal/utils/utils.ts mode change 100644 => 100755 src/sdk/index.ts mode change 100644 => 100755 src/sdk/models/errors/index.ts mode change 100644 => 100755 src/sdk/models/errors/sdkerror.ts mode change 100644 => 100755 src/sdk/models/operations/cancelfinetune.ts mode change 100644 => 100755 src/sdk/models/operations/createchatcompletion.ts mode change 100644 => 100755 src/sdk/models/operations/createcompletion.ts mode change 100644 => 100755 src/sdk/models/operations/createedit.ts mode change 100644 => 100755 src/sdk/models/operations/createembedding.ts mode change 100644 => 100755 src/sdk/models/operations/createfile.ts mode change 100644 => 100755 src/sdk/models/operations/createfinetune.ts mode change 100644 => 100755 src/sdk/models/operations/createimage.ts mode change 100644 => 100755 src/sdk/models/operations/createimageedit.ts mode change 100644 => 100755 src/sdk/models/operations/createimagevariation.ts mode change 100644 => 100755 src/sdk/models/operations/createmoderation.ts mode change 100644 => 100755 src/sdk/models/operations/createtranscription.ts mode change 100644 => 100755 src/sdk/models/operations/createtranslation.ts mode change 100644 => 100755 src/sdk/models/operations/deletefile.ts mode change 100644 => 100755 src/sdk/models/operations/deletemodel.ts mode change 100644 => 100755 src/sdk/models/operations/downloadfile.ts mode change 100644 => 100755 src/sdk/models/operations/index.ts mode change 100644 => 100755 src/sdk/models/operations/listfiles.ts mode change 100644 => 100755 src/sdk/models/operations/listfinetuneevents.ts mode change 100644 => 100755 src/sdk/models/operations/listfinetunes.ts mode change 100644 => 100755 src/sdk/models/operations/listmodels.ts mode change 100644 => 100755 src/sdk/models/operations/retrievefile.ts mode change 100644 => 100755 src/sdk/models/operations/retrievefinetune.ts mode change 100644 => 100755 src/sdk/models/operations/retrievemodel.ts mode change 100644 => 100755 src/sdk/models/shared/chatcompletionfunctions.ts mode change 100644 => 100755 src/sdk/models/shared/chatcompletionrequestmessage.ts mode change 100644 => 100755 src/sdk/models/shared/chatcompletionresponsemessage.ts mode change 100644 => 100755 src/sdk/models/shared/createchatcompletionrequest.ts mode change 100644 => 100755 src/sdk/models/shared/createchatcompletionresponse.ts mode change 100644 => 100755 src/sdk/models/shared/createcompletionrequest.ts mode change 100644 => 100755 src/sdk/models/shared/createcompletionresponse.ts mode change 100644 => 100755 src/sdk/models/shared/createeditrequest.ts mode change 100644 => 100755 src/sdk/models/shared/createeditresponse.ts mode change 100644 => 100755 src/sdk/models/shared/createembeddingrequest.ts mode change 100644 => 100755 src/sdk/models/shared/createembeddingresponse.ts mode change 100644 => 100755 src/sdk/models/shared/createfilerequest.ts mode change 100644 => 100755 src/sdk/models/shared/createfinetunerequest.ts mode change 100644 => 100755 src/sdk/models/shared/createimageeditrequest2.ts mode change 100644 => 100755 src/sdk/models/shared/createimagerequest.ts mode change 100644 => 100755 src/sdk/models/shared/createimagevariationrequest2.ts mode change 100644 => 100755 src/sdk/models/shared/createmoderationrequest.ts mode change 100644 => 100755 src/sdk/models/shared/createmoderationresponse.ts mode change 100644 => 100755 src/sdk/models/shared/createtranscriptionrequest1.ts mode change 100644 => 100755 src/sdk/models/shared/createtranscriptionresponse.ts mode change 100644 => 100755 src/sdk/models/shared/createtranslationrequest.ts mode change 100644 => 100755 src/sdk/models/shared/createtranslationresponse.ts mode change 100644 => 100755 src/sdk/models/shared/deletefileresponse.ts mode change 100644 => 100755 src/sdk/models/shared/deletemodelresponse.ts mode change 100644 => 100755 src/sdk/models/shared/finetune.ts mode change 100644 => 100755 src/sdk/models/shared/finetuneevent.ts mode change 100644 => 100755 src/sdk/models/shared/imagesresponse.ts mode change 100644 => 100755 src/sdk/models/shared/index.ts mode change 100644 => 100755 src/sdk/models/shared/listfilesresponse.ts mode change 100644 => 100755 src/sdk/models/shared/listfinetuneeventsresponse.ts mode change 100644 => 100755 src/sdk/models/shared/listfinetunesresponse.ts mode change 100644 => 100755 src/sdk/models/shared/listmodelsresponse.ts mode change 100644 => 100755 src/sdk/models/shared/model.ts mode change 100644 => 100755 src/sdk/models/shared/openaifile.ts mode change 100644 => 100755 src/sdk/openai.ts mode change 100644 => 100755 src/sdk/sdk.ts mode change 100644 => 100755 src/sdk/types/index.ts mode change 100644 => 100755 src/sdk/types/rfcdate.ts mode change 100644 => 100755 tsconfig.json diff --git a/.eslintrc.yml b/.eslintrc.yml old mode 100644 new mode 100755 diff --git a/RELEASES.md b/RELEASES.md index e5cc398..1bbcfe4 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -364,4 +364,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.64.0 (2.71.0) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.12.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.12.0 - . \ No newline at end of file +- [NPM v2.12.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.12.0 - . + +## 2023-07-26 01:08:38 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.65.0 (2.73.0) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.13.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.13.0 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/cancelfinetunerequest.md b/docs/models/operations/cancelfinetunerequest.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/cancelfinetuneresponse.md b/docs/models/operations/cancelfinetuneresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/createchatcompletionresponse.md b/docs/models/operations/createchatcompletionresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/createcompletionresponse.md b/docs/models/operations/createcompletionresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/createeditresponse.md b/docs/models/operations/createeditresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/createembeddingresponse.md b/docs/models/operations/createembeddingresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/createfileresponse.md b/docs/models/operations/createfileresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/createfinetuneresponse.md b/docs/models/operations/createfinetuneresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/createimageeditresponse.md b/docs/models/operations/createimageeditresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/createimageresponse.md b/docs/models/operations/createimageresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/createimagevariationresponse.md b/docs/models/operations/createimagevariationresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/createmoderationresponse.md b/docs/models/operations/createmoderationresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/createtranscriptionresponse.md b/docs/models/operations/createtranscriptionresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/createtranslationresponse.md b/docs/models/operations/createtranslationresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/deletefilerequest.md b/docs/models/operations/deletefilerequest.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/deletefileresponse.md b/docs/models/operations/deletefileresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/deletemodelrequest.md b/docs/models/operations/deletemodelrequest.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/deletemodelresponse.md b/docs/models/operations/deletemodelresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/downloadfilerequest.md b/docs/models/operations/downloadfilerequest.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/downloadfileresponse.md b/docs/models/operations/downloadfileresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/listfilesresponse.md b/docs/models/operations/listfilesresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/listfinetuneeventsrequest.md b/docs/models/operations/listfinetuneeventsrequest.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/listfinetuneeventsresponse.md b/docs/models/operations/listfinetuneeventsresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/listfinetunesresponse.md b/docs/models/operations/listfinetunesresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/listmodelsresponse.md b/docs/models/operations/listmodelsresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/retrievefilerequest.md b/docs/models/operations/retrievefilerequest.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/retrievefileresponse.md b/docs/models/operations/retrievefileresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/retrievefinetunerequest.md b/docs/models/operations/retrievefinetunerequest.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/retrievefinetuneresponse.md b/docs/models/operations/retrievefinetuneresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/retrievemodelrequest.md b/docs/models/operations/retrievemodelrequest.md old mode 100644 new mode 100755 diff --git a/docs/models/operations/retrievemodelresponse.md b/docs/models/operations/retrievemodelresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/chatcompletionfunctions.md b/docs/models/shared/chatcompletionfunctions.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/chatcompletionrequestmessage.md b/docs/models/shared/chatcompletionrequestmessage.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/chatcompletionrequestmessagefunctioncall.md b/docs/models/shared/chatcompletionrequestmessagefunctioncall.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/chatcompletionrequestmessagerole.md b/docs/models/shared/chatcompletionrequestmessagerole.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/chatcompletionresponsemessage.md b/docs/models/shared/chatcompletionresponsemessage.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/chatcompletionresponsemessagefunctioncall.md b/docs/models/shared/chatcompletionresponsemessagefunctioncall.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/chatcompletionresponsemessagerole.md b/docs/models/shared/chatcompletionresponsemessagerole.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createchatcompletionrequest.md b/docs/models/shared/createchatcompletionrequest.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createchatcompletionrequestfunctioncall1.md b/docs/models/shared/createchatcompletionrequestfunctioncall1.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createchatcompletionrequestfunctioncall2.md b/docs/models/shared/createchatcompletionrequestfunctioncall2.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createchatcompletionrequestmodel2.md b/docs/models/shared/createchatcompletionrequestmodel2.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createchatcompletionresponse.md b/docs/models/shared/createchatcompletionresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createchatcompletionresponsechoices.md b/docs/models/shared/createchatcompletionresponsechoices.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md b/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createchatcompletionresponseusage.md b/docs/models/shared/createchatcompletionresponseusage.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createcompletionrequest.md b/docs/models/shared/createcompletionrequest.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createcompletionrequestmodel2.md b/docs/models/shared/createcompletionrequestmodel2.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createcompletionresponse.md b/docs/models/shared/createcompletionresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createcompletionresponsechoices.md b/docs/models/shared/createcompletionresponsechoices.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createcompletionresponsechoicesfinishreason.md b/docs/models/shared/createcompletionresponsechoicesfinishreason.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createcompletionresponsechoiceslogprobs.md b/docs/models/shared/createcompletionresponsechoiceslogprobs.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createcompletionresponseusage.md b/docs/models/shared/createcompletionresponseusage.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createeditrequest.md b/docs/models/shared/createeditrequest.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createeditrequestmodel2.md b/docs/models/shared/createeditrequestmodel2.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createeditresponse.md b/docs/models/shared/createeditresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createeditresponsechoices.md b/docs/models/shared/createeditresponsechoices.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createeditresponsechoicesfinishreason.md b/docs/models/shared/createeditresponsechoicesfinishreason.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createeditresponseusage.md b/docs/models/shared/createeditresponseusage.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createembeddingrequest.md b/docs/models/shared/createembeddingrequest.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createembeddingrequestmodel2.md b/docs/models/shared/createembeddingrequestmodel2.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createembeddingresponse.md b/docs/models/shared/createembeddingresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createembeddingresponsedata.md b/docs/models/shared/createembeddingresponsedata.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createembeddingresponseusage.md b/docs/models/shared/createembeddingresponseusage.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createfilerequest.md b/docs/models/shared/createfilerequest.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createfilerequestfile.md b/docs/models/shared/createfilerequestfile.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createfinetunerequest.md b/docs/models/shared/createfinetunerequest.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createfinetunerequestmodel2.md b/docs/models/shared/createfinetunerequestmodel2.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createimageeditrequest2.md b/docs/models/shared/createimageeditrequest2.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createimageeditrequestimage.md b/docs/models/shared/createimageeditrequestimage.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createimageeditrequestmask.md b/docs/models/shared/createimageeditrequestmask.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createimageeditrequestresponseformat.md b/docs/models/shared/createimageeditrequestresponseformat.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createimageeditrequestsize.md b/docs/models/shared/createimageeditrequestsize.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createimagerequest.md b/docs/models/shared/createimagerequest.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createimagerequestresponseformat.md b/docs/models/shared/createimagerequestresponseformat.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createimagerequestsize.md b/docs/models/shared/createimagerequestsize.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createimagevariationrequest2.md b/docs/models/shared/createimagevariationrequest2.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createimagevariationrequestimage.md b/docs/models/shared/createimagevariationrequestimage.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createimagevariationrequestresponseformat.md b/docs/models/shared/createimagevariationrequestresponseformat.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createimagevariationrequestsize.md b/docs/models/shared/createimagevariationrequestsize.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createmoderationrequest.md b/docs/models/shared/createmoderationrequest.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createmoderationrequestmodel2.md b/docs/models/shared/createmoderationrequestmodel2.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createmoderationresponse.md b/docs/models/shared/createmoderationresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createmoderationresponseresults.md b/docs/models/shared/createmoderationresponseresults.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createmoderationresponseresultscategories.md b/docs/models/shared/createmoderationresponseresultscategories.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createmoderationresponseresultscategoryscores.md b/docs/models/shared/createmoderationresponseresultscategoryscores.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createtranscriptionrequest1.md b/docs/models/shared/createtranscriptionrequest1.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createtranscriptionrequestfile.md b/docs/models/shared/createtranscriptionrequestfile.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createtranscriptionrequestmodel2.md b/docs/models/shared/createtranscriptionrequestmodel2.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createtranscriptionrequestresponseformat.md b/docs/models/shared/createtranscriptionrequestresponseformat.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createtranscriptionresponse.md b/docs/models/shared/createtranscriptionresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createtranslationrequest.md b/docs/models/shared/createtranslationrequest.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createtranslationrequestfile.md b/docs/models/shared/createtranslationrequestfile.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createtranslationrequestmodel2.md b/docs/models/shared/createtranslationrequestmodel2.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/createtranslationresponse.md b/docs/models/shared/createtranslationresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/deletefileresponse.md b/docs/models/shared/deletefileresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/deletemodelresponse.md b/docs/models/shared/deletemodelresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/finetune.md b/docs/models/shared/finetune.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/finetuneevent.md b/docs/models/shared/finetuneevent.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/finetunehyperparams.md b/docs/models/shared/finetunehyperparams.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/imagesresponse.md b/docs/models/shared/imagesresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/imagesresponsedata.md b/docs/models/shared/imagesresponsedata.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/listfilesresponse.md b/docs/models/shared/listfilesresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/listfinetuneeventsresponse.md b/docs/models/shared/listfinetuneeventsresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/listfinetunesresponse.md b/docs/models/shared/listfinetunesresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/listmodelsresponse.md b/docs/models/shared/listmodelsresponse.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/model.md b/docs/models/shared/model.md old mode 100644 new mode 100755 diff --git a/docs/models/shared/openaifile.md b/docs/models/shared/openaifile.md old mode 100644 new mode 100755 diff --git a/docs/sdks/gpt/README.md b/docs/sdks/gpt/README.md old mode 100644 new mode 100755 diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md old mode 100644 new mode 100755 diff --git a/gen.yaml b/gen.yaml index 3d00423..13eb86f 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 60758465b46e16e9341d0f17cc2819bd docVersion: 2.0.0 - speakeasyVersion: 1.64.0 - generationVersion: 2.71.0 + speakeasyVersion: 1.65.0 + generationVersion: 2.73.0 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.12.0 + version: 2.13.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/jest.config.js b/jest.config.js old mode 100644 new mode 100755 diff --git a/package-lock.json b/package-lock.json old mode 100644 new mode 100755 index e788ed4..884caa9 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.12.0", + "version": "2.13.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.12.0", + "version": "2.13.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json old mode 100644 new mode 100755 index 479f564..ffeaaaf --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.12.0", + "version": "2.13.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/index.ts b/src/index.ts old mode 100644 new mode 100755 diff --git a/src/internal/utils/contenttype.ts b/src/internal/utils/contenttype.ts old mode 100644 new mode 100755 diff --git a/src/internal/utils/headers.ts b/src/internal/utils/headers.ts old mode 100644 new mode 100755 diff --git a/src/internal/utils/index.ts b/src/internal/utils/index.ts old mode 100644 new mode 100755 diff --git a/src/internal/utils/pathparams.ts b/src/internal/utils/pathparams.ts old mode 100644 new mode 100755 diff --git a/src/internal/utils/queryparams.ts b/src/internal/utils/queryparams.ts old mode 100644 new mode 100755 diff --git a/src/internal/utils/requestbody.ts b/src/internal/utils/requestbody.ts old mode 100644 new mode 100755 diff --git a/src/internal/utils/retries.ts b/src/internal/utils/retries.ts old mode 100644 new mode 100755 diff --git a/src/internal/utils/security.ts b/src/internal/utils/security.ts old mode 100644 new mode 100755 diff --git a/src/internal/utils/utils.ts b/src/internal/utils/utils.ts old mode 100644 new mode 100755 diff --git a/src/sdk/index.ts b/src/sdk/index.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/errors/index.ts b/src/sdk/models/errors/index.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/errors/sdkerror.ts b/src/sdk/models/errors/sdkerror.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/cancelfinetune.ts b/src/sdk/models/operations/cancelfinetune.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/createchatcompletion.ts b/src/sdk/models/operations/createchatcompletion.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/createcompletion.ts b/src/sdk/models/operations/createcompletion.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/createedit.ts b/src/sdk/models/operations/createedit.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/createembedding.ts b/src/sdk/models/operations/createembedding.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/createfile.ts b/src/sdk/models/operations/createfile.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/createfinetune.ts b/src/sdk/models/operations/createfinetune.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/createimage.ts b/src/sdk/models/operations/createimage.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/createimageedit.ts b/src/sdk/models/operations/createimageedit.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/createimagevariation.ts b/src/sdk/models/operations/createimagevariation.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/createmoderation.ts b/src/sdk/models/operations/createmoderation.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/createtranscription.ts b/src/sdk/models/operations/createtranscription.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/createtranslation.ts b/src/sdk/models/operations/createtranslation.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/deletefile.ts b/src/sdk/models/operations/deletefile.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/deletemodel.ts b/src/sdk/models/operations/deletemodel.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/downloadfile.ts b/src/sdk/models/operations/downloadfile.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/index.ts b/src/sdk/models/operations/index.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/listfiles.ts b/src/sdk/models/operations/listfiles.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/listfinetuneevents.ts b/src/sdk/models/operations/listfinetuneevents.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/listfinetunes.ts b/src/sdk/models/operations/listfinetunes.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/listmodels.ts b/src/sdk/models/operations/listmodels.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/retrievefile.ts b/src/sdk/models/operations/retrievefile.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/retrievefinetune.ts b/src/sdk/models/operations/retrievefinetune.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/operations/retrievemodel.ts b/src/sdk/models/operations/retrievemodel.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/chatcompletionfunctions.ts b/src/sdk/models/shared/chatcompletionfunctions.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/chatcompletionrequestmessage.ts b/src/sdk/models/shared/chatcompletionrequestmessage.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/chatcompletionresponsemessage.ts b/src/sdk/models/shared/chatcompletionresponsemessage.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createchatcompletionrequest.ts b/src/sdk/models/shared/createchatcompletionrequest.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createchatcompletionresponse.ts b/src/sdk/models/shared/createchatcompletionresponse.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createcompletionrequest.ts b/src/sdk/models/shared/createcompletionrequest.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createcompletionresponse.ts b/src/sdk/models/shared/createcompletionresponse.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createeditrequest.ts b/src/sdk/models/shared/createeditrequest.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createeditresponse.ts b/src/sdk/models/shared/createeditresponse.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createembeddingrequest.ts b/src/sdk/models/shared/createembeddingrequest.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createembeddingresponse.ts b/src/sdk/models/shared/createembeddingresponse.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createfilerequest.ts b/src/sdk/models/shared/createfilerequest.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createfinetunerequest.ts b/src/sdk/models/shared/createfinetunerequest.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createimageeditrequest2.ts b/src/sdk/models/shared/createimageeditrequest2.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createimagerequest.ts b/src/sdk/models/shared/createimagerequest.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createimagevariationrequest2.ts b/src/sdk/models/shared/createimagevariationrequest2.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createmoderationrequest.ts b/src/sdk/models/shared/createmoderationrequest.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createmoderationresponse.ts b/src/sdk/models/shared/createmoderationresponse.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createtranscriptionrequest1.ts b/src/sdk/models/shared/createtranscriptionrequest1.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createtranscriptionresponse.ts b/src/sdk/models/shared/createtranscriptionresponse.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createtranslationrequest.ts b/src/sdk/models/shared/createtranslationrequest.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/createtranslationresponse.ts b/src/sdk/models/shared/createtranslationresponse.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/deletefileresponse.ts b/src/sdk/models/shared/deletefileresponse.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/deletemodelresponse.ts b/src/sdk/models/shared/deletemodelresponse.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/finetune.ts b/src/sdk/models/shared/finetune.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/finetuneevent.ts b/src/sdk/models/shared/finetuneevent.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/imagesresponse.ts b/src/sdk/models/shared/imagesresponse.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/index.ts b/src/sdk/models/shared/index.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/listfilesresponse.ts b/src/sdk/models/shared/listfilesresponse.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/listfinetuneeventsresponse.ts b/src/sdk/models/shared/listfinetuneeventsresponse.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/listfinetunesresponse.ts b/src/sdk/models/shared/listfinetunesresponse.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/listmodelsresponse.ts b/src/sdk/models/shared/listmodelsresponse.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/model.ts b/src/sdk/models/shared/model.ts old mode 100644 new mode 100755 diff --git a/src/sdk/models/shared/openaifile.ts b/src/sdk/models/shared/openaifile.ts old mode 100644 new mode 100755 diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts old mode 100644 new mode 100755 diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts old mode 100644 new mode 100755 index cf6988f..6e2de0e --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.12.0"; - genVersion = "2.71.0"; + sdkVersion = "2.13.0"; + genVersion = "2.73.0"; public constructor(init?: Partial) { Object.assign(this, init); diff --git a/src/sdk/types/index.ts b/src/sdk/types/index.ts old mode 100644 new mode 100755 diff --git a/src/sdk/types/rfcdate.ts b/src/sdk/types/rfcdate.ts old mode 100644 new mode 100755 diff --git a/tsconfig.json b/tsconfig.json old mode 100644 new mode 100755 From b5f2bd0e1cbda9fbacf10674eee4a816f9a3c814 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Thu, 27 Jul 2023 01:01:41 +0000 Subject: [PATCH 29/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.65.1 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 1bbcfe4..4c6a40a 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -372,4 +372,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.65.0 (2.73.0) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.13.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.13.0 - . \ No newline at end of file +- [NPM v2.13.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.13.0 - . + +## 2023-07-27 01:01:21 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.65.1 (2.73.1) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.13.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.13.1 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 13eb86f..50bc7e7 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 60758465b46e16e9341d0f17cc2819bd docVersion: 2.0.0 - speakeasyVersion: 1.65.0 - generationVersion: 2.73.0 + speakeasyVersion: 1.65.1 + generationVersion: 2.73.1 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.13.0 + version: 2.13.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 884caa9..474817c 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.13.0", + "version": "2.13.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.13.0", + "version": "2.13.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index ffeaaaf..ce718a3 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.13.0", + "version": "2.13.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 6e2de0e..774b3af 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.13.0"; - genVersion = "2.73.0"; + sdkVersion = "2.13.1"; + genVersion = "2.73.1"; public constructor(init?: Partial) { Object.assign(this, init); From 38a44b43d4c88f9b1908782ab6de65cdf34249ac Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 28 Jul 2023 01:02:20 +0000 Subject: [PATCH 30/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.65.2 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/openai.ts | 24 ++++++++++++++++++++++++ src/sdk/sdk.ts | 4 ++-- 6 files changed, 41 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 4c6a40a..f2261ea 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -380,4 +380,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.65.1 (2.73.1) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.13.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.13.1 - . \ No newline at end of file +- [NPM v2.13.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.13.1 - . + +## 2023-07-28 01:01:57 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.65.2 (2.75.1) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.14.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.14.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 50bc7e7..cf2898d 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 60758465b46e16e9341d0f17cc2819bd docVersion: 2.0.0 - speakeasyVersion: 1.65.1 - generationVersion: 2.73.1 + speakeasyVersion: 1.65.2 + generationVersion: 2.75.1 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.13.1 + version: 2.14.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 474817c..87fa08a 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.13.1", + "version": "2.14.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.13.1", + "version": "2.14.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index ce718a3..6030c0b 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.13.1", + "version": "2.14.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index cd8d3f3..546fcc0 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -12,6 +12,7 @@ import { AxiosInstance, AxiosRequestConfig, AxiosResponse } from "axios"; /** * The OpenAI REST API */ + export class OpenAI { private sdkConfiguration: SDKConfiguration; @@ -41,6 +42,7 @@ export class OpenAI { const headers = { ...config?.headers }; headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -117,6 +119,7 @@ export class OpenAI { if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -198,6 +201,7 @@ export class OpenAI { if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -280,6 +284,7 @@ export class OpenAI { if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -360,6 +365,7 @@ export class OpenAI { if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -441,6 +447,7 @@ export class OpenAI { if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -523,6 +530,7 @@ export class OpenAI { if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -600,6 +608,7 @@ export class OpenAI { if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -680,6 +689,7 @@ export class OpenAI { if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -760,6 +770,7 @@ export class OpenAI { if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -841,6 +852,7 @@ export class OpenAI { if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -921,6 +933,7 @@ export class OpenAI { if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -1002,6 +1015,7 @@ export class OpenAI { if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -1070,6 +1084,7 @@ export class OpenAI { const headers = { ...config?.headers }; headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -1137,6 +1152,7 @@ export class OpenAI { const headers = { ...config?.headers }; headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -1204,6 +1220,7 @@ export class OpenAI { const headers = { ...config?.headers }; headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -1261,6 +1278,7 @@ export class OpenAI { const headers = { ...config?.headers }; headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -1330,6 +1348,7 @@ export class OpenAI { const headers = { ...config?.headers }; const queryParams: string = utils.serializeQueryParams(req); headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -1392,6 +1411,7 @@ export class OpenAI { const headers = { ...config?.headers }; headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -1452,6 +1472,7 @@ export class OpenAI { const headers = { ...config?.headers }; headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -1519,6 +1540,7 @@ export class OpenAI { const headers = { ...config?.headers }; headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -1586,6 +1608,7 @@ export class OpenAI { const headers = { ...config?.headers }; headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; @@ -1650,6 +1673,7 @@ export class OpenAI { const headers = { ...config?.headers }; headers["Accept"] = "application/json"; + headers[ "user-agent" ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 774b3af..a066aef 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.13.1"; - genVersion = "2.73.1"; + sdkVersion = "2.14.0"; + genVersion = "2.75.1"; public constructor(init?: Partial) { Object.assign(this, init); From b2b2bc042dddea589d6a8e4346b8838af56bd2c0 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Tue, 1 Aug 2023 01:10:08 +0000 Subject: [PATCH 31/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.66.1 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index f2261ea..2892f7b 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -388,4 +388,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.65.2 (2.75.1) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.14.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.14.0 - . \ No newline at end of file +- [NPM v2.14.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.14.0 - . + +## 2023-08-01 01:09:49 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.66.1 (2.75.2) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.14.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.14.1 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index cf2898d..ecce064 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 60758465b46e16e9341d0f17cc2819bd docVersion: 2.0.0 - speakeasyVersion: 1.65.2 - generationVersion: 2.75.1 + speakeasyVersion: 1.66.1 + generationVersion: 2.75.2 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.14.0 + version: 2.14.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 87fa08a..b1bba02 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.14.0", + "version": "2.14.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.14.0", + "version": "2.14.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 6030c0b..b33799b 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.14.0", + "version": "2.14.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index a066aef..7110a36 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.14.0"; - genVersion = "2.75.1"; + sdkVersion = "2.14.1"; + genVersion = "2.75.2"; public constructor(init?: Partial) { Object.assign(this, init); From f2265dcd736af9e1d56f2420c95a6838e310ecad Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Thu, 3 Aug 2023 01:04:07 +0000 Subject: [PATCH 32/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.68.1 --- RELEASES.md | 10 +++++++++- docs/sdks/openai/README.md | 2 +- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/openai.ts | 2 +- src/sdk/sdk.ts | 4 ++-- 7 files changed, 19 insertions(+), 11 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 2892f7b..e05afa4 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -396,4 +396,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.66.1 (2.75.2) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.14.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.14.1 - . \ No newline at end of file +- [NPM v2.14.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.14.1 - . + +## 2023-08-03 01:03:42 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.68.1 (2.77.1) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.15.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.15.0 - . \ No newline at end of file diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index fca8bcb..8d8ac54 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -244,7 +244,7 @@ sdk.openAI.createCompletion({ Creates a new edit for the provided input, instruction, and parameters. -> :warning: **DEPRECATED**: this method will be removed in a future release, please migrate away from it as soon as possible. +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. ### Example Usage diff --git a/gen.yaml b/gen.yaml index ecce064..bd8ac60 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 60758465b46e16e9341d0f17cc2819bd docVersion: 2.0.0 - speakeasyVersion: 1.66.1 - generationVersion: 2.75.2 + speakeasyVersion: 1.68.1 + generationVersion: 2.77.1 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.14.1 + version: 2.15.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index b1bba02..a46e2db 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.14.1", + "version": "2.15.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.14.1", + "version": "2.15.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index b33799b..4ada516 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.14.1", + "version": "2.15.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index 546fcc0..0797288 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -252,7 +252,7 @@ export class OpenAI { /** * Creates a new edit for the provided input, instruction, and parameters. * - * @deprecated this method will be removed in a future release, please migrate away from it as soon as possible + * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. */ async createEdit( req: shared.CreateEditRequest, diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 7110a36..7b3943a 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.14.1"; - genVersion = "2.75.2"; + sdkVersion = "2.15.0"; + genVersion = "2.77.1"; public constructor(init?: Partial) { Object.assign(this, init); From f4e816e42c39a66d25283bab83e758b082fb472e Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 4 Aug 2023 01:05:48 +0000 Subject: [PATCH 33/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.68.3 --- RELEASES.md | 10 +++++++++- gen.yaml | 6 +++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 17 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index e05afa4..ee87b77 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -404,4 +404,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.68.1 (2.77.1) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.15.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.15.0 - . \ No newline at end of file +- [NPM v2.15.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.15.0 - . + +## 2023-08-04 01:05:28 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.68.3 (2.81.1) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.16.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.16.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index bd8ac60..f0f0240 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,15 @@ configVersion: 1.0.0 management: docChecksum: 60758465b46e16e9341d0f17cc2819bd docVersion: 2.0.0 - speakeasyVersion: 1.68.1 - generationVersion: 2.77.1 + speakeasyVersion: 1.68.3 + generationVersion: 2.81.1 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false typescript: - version: 2.15.0 + version: 2.16.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index a46e2db..17b4fbc 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.15.0", + "version": "2.16.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.15.0", + "version": "2.16.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 4ada516..b4320b0 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.15.0", + "version": "2.16.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 7b3943a..41b124a 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.15.0"; - genVersion = "2.77.1"; + sdkVersion = "2.16.0"; + genVersion = "2.81.1"; public constructor(init?: Partial) { Object.assign(this, init); From ab0c22da0a19d61279c51863ffe5f56478134e37 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Tue, 8 Aug 2023 01:02:14 +0000 Subject: [PATCH 34/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.69.1 --- RELEASES.md | 10 +++++++++- gen.yaml | 11 ++++++++--- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 22 insertions(+), 9 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index ee87b77..e140f09 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -412,4 +412,12 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.68.3 (2.81.1) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.16.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.16.0 - . \ No newline at end of file +- [NPM v2.16.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.16.0 - . + +## 2023-08-08 01:01:55 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.69.1 (2.82.0) https://github.com/speakeasy-api/speakeasy +### Releases +- [NPM v2.17.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.17.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index f0f0240..c0b1eff 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,15 +2,20 @@ configVersion: 1.0.0 management: docChecksum: 60758465b46e16e9341d0f17cc2819bd docVersion: 2.0.0 - speakeasyVersion: 1.68.3 - generationVersion: 2.81.1 + speakeasyVersion: 1.69.1 + generationVersion: 2.82.0 generation: sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false +features: + typescript: + core: 2.82.0 + deprecations: 2.81.1 + globalServerURLs: 2.81.1 typescript: - version: 2.16.0 + version: 2.17.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 17b4fbc..b6f6e98 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.16.0", + "version": "2.17.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.16.0", + "version": "2.17.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index b4320b0..585454e 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.16.0", + "version": "2.17.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 41b124a..a2807d7 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.16.0"; - genVersion = "2.81.1"; + sdkVersion = "2.17.0"; + genVersion = "2.82.0"; public constructor(init?: Partial) { Object.assign(this, init); From a78ace66b40b9b07d5b19bcec398b8e753bf8429 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 11 Aug 2023 00:52:39 +0000 Subject: [PATCH 35/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.71.0 --- RELEASES.md | 12 +++++++++++- docs/models/shared/chatcompletionresponsemessage.md | 2 +- .../chatcompletionresponsemessagefunctioncall.md | 4 ++-- gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- .../models/shared/chatcompletionresponsemessage.ts | 6 +++--- src/sdk/sdk.ts | 4 ++-- 8 files changed, 26 insertions(+), 16 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index e140f09..2c1fe55 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -420,4 +420,14 @@ Based on: - OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml - Speakeasy CLI 1.69.1 (2.82.0) https://github.com/speakeasy-api/speakeasy ### Releases -- [NPM v2.17.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.17.0 - . \ No newline at end of file +- [NPM v2.17.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.17.0 - . + +## 2023-08-11 00:52:11 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.71.0 (2.83.3) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.17.1] . +### Releases +- [NPM v2.17.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.17.1 - . \ No newline at end of file diff --git a/docs/models/shared/chatcompletionresponsemessage.md b/docs/models/shared/chatcompletionresponsemessage.md index 73d7345..9f8ac6c 100755 --- a/docs/models/shared/chatcompletionresponsemessage.md +++ b/docs/models/shared/chatcompletionresponsemessage.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -| `content` | *string* | :heavy_minus_sign: | The contents of the message. | +| `content` | *string* | :heavy_check_mark: | The contents of the message. | | `functionCall` | [ChatCompletionResponseMessageFunctionCall](../../models/shared/chatcompletionresponsemessagefunctioncall.md) | :heavy_minus_sign: | The name and arguments of a function that should be called, as generated by the model. | | `role` | [ChatCompletionResponseMessageRole](../../models/shared/chatcompletionresponsemessagerole.md) | :heavy_check_mark: | The role of the author of this message. | \ No newline at end of file diff --git a/docs/models/shared/chatcompletionresponsemessagefunctioncall.md b/docs/models/shared/chatcompletionresponsemessagefunctioncall.md index bd9ac54..618c861 100755 --- a/docs/models/shared/chatcompletionresponsemessagefunctioncall.md +++ b/docs/models/shared/chatcompletionresponsemessagefunctioncall.md @@ -7,5 +7,5 @@ The name and arguments of a function that should be called, as generated by the | Field | Type | Required | Description | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `arguments` | *string* | :heavy_minus_sign: | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | -| `name` | *string* | :heavy_minus_sign: | The name of the function to call. | \ No newline at end of file +| `arguments` | *string* | :heavy_check_mark: | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | +| `name` | *string* | :heavy_check_mark: | The name of the function to call. | \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index c0b1eff..f966c88 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,9 +1,9 @@ configVersion: 1.0.0 management: - docChecksum: 60758465b46e16e9341d0f17cc2819bd + docChecksum: 679eee9640a500ecd4a9a1b0e8a56da3 docVersion: 2.0.0 - speakeasyVersion: 1.69.1 - generationVersion: 2.82.0 + speakeasyVersion: 1.71.0 + generationVersion: 2.83.3 generation: sdkClassName: gpt sdkFlattening: true @@ -15,7 +15,7 @@ features: deprecations: 2.81.1 globalServerURLs: 2.81.1 typescript: - version: 2.17.0 + version: 2.17.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index b6f6e98..4b51ad2 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.17.0", + "version": "2.17.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.17.0", + "version": "2.17.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 585454e..588f11c 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.17.0", + "version": "2.17.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/chatcompletionresponsemessage.ts b/src/sdk/models/shared/chatcompletionresponsemessage.ts index 74139ec..ae5ed14 100755 --- a/src/sdk/models/shared/chatcompletionresponsemessage.ts +++ b/src/sdk/models/shared/chatcompletionresponsemessage.ts @@ -14,14 +14,14 @@ export class ChatCompletionResponseMessageFunctionCall extends SpeakeasyBase { */ @SpeakeasyMetadata() @Expose({ name: "arguments" }) - arguments?: string; + arguments: string; /** * The name of the function to call. */ @SpeakeasyMetadata() @Expose({ name: "name" }) - name?: string; + name: string; } /** @@ -40,7 +40,7 @@ export class ChatCompletionResponseMessage extends SpeakeasyBase { */ @SpeakeasyMetadata() @Expose({ name: "content" }) - content?: string; + content: string; /** * The name and arguments of a function that should be called, as generated by the model. diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index a2807d7..7d61854 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.17.0"; - genVersion = "2.82.0"; + sdkVersion = "2.17.1"; + genVersion = "2.83.3"; public constructor(init?: Partial) { Object.assign(this, init); From 916e170dbc5bc2d90aa9a1bef9588211f123b4d1 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sun, 13 Aug 2023 00:55:38 +0000 Subject: [PATCH 36/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.71.0 --- RELEASES.md | 12 ++- .../shared/chatcompletionresponsemessage.md | 2 + docs/models/shared/completionusage.md | 12 +++ .../shared/createchatcompletionresponse.md | 14 +-- .../createchatcompletionresponsechoices.md | 10 +- ...atcompletionresponsechoicesfinishreason.md | 4 + .../createchatcompletionresponseusage.md | 10 -- .../models/shared/createcompletionresponse.md | 15 +-- .../shared/createcompletionresponsechoices.md | 12 +-- ...tecompletionresponsechoicesfinishreason.md | 4 + .../shared/createcompletionresponseusage.md | 10 -- docs/models/shared/createeditresponse.md | 8 +- .../shared/createeditresponsechoices.md | 10 +- .../createeditresponsechoicesfinishreason.md | 4 + docs/models/shared/createeditresponseusage.md | 10 -- docs/models/shared/createembeddingresponse.md | 8 +- .../shared/createembeddingresponsedata.md | 10 -- .../shared/createembeddingresponseusage.md | 10 +- .../models/shared/createmoderationresponse.md | 8 +- .../shared/createmoderationresponseresults.md | 6 +- ...eatemoderationresponseresultscategories.md | 20 ++-- ...moderationresponseresultscategoryscores.md | 20 ++-- .../shared/createtranscriptionrequest1.md | 2 +- .../models/shared/createtranslationrequest.md | 2 +- docs/models/shared/embedding.md | 13 +++ docs/models/shared/finetune.md | 33 +++---- docs/models/shared/finetunehyperparams.md | 20 ++-- docs/models/shared/image.md | 11 +++ docs/models/shared/imagesresponse.md | 8 +- docs/models/shared/imagesresponsedata.md | 9 -- docs/models/shared/model.md | 14 +-- docs/models/shared/openaifile.md | 23 ++--- files.gen | 11 ++- gen.yaml | 4 +- package-lock.json | 4 +- package.json | 2 +- .../shared/chatcompletionresponsemessage.ts | 3 + src/sdk/models/shared/completionusage.ts | 32 +++++++ .../shared/createchatcompletionresponse.ts | 59 ++++++++---- .../models/shared/createcompletionresponse.ts | 56 ++++++++---- src/sdk/models/shared/createeditresponse.ts | 51 +++++++---- .../models/shared/createembeddingresponse.ts | 42 +++++---- .../models/shared/createmoderationresponse.ts | 68 +++++++++++++- .../shared/createtranscriptionrequest1.ts | 2 +- .../models/shared/createtranslationrequest.ts | 2 +- src/sdk/models/shared/embedding.ts | 38 ++++++++ src/sdk/models/shared/finetune.ts | 91 ++++++++++++++++++- src/sdk/models/shared/image.ts | 25 +++++ src/sdk/models/shared/imagesresponse.ts | 17 +--- src/sdk/models/shared/index.ts | 3 + src/sdk/models/shared/model.ts | 14 ++- src/sdk/models/shared/openaifile.ts | 32 ++++++- src/sdk/sdk.ts | 2 +- 53 files changed, 649 insertions(+), 263 deletions(-) create mode 100755 docs/models/shared/completionusage.md delete mode 100755 docs/models/shared/createchatcompletionresponseusage.md delete mode 100755 docs/models/shared/createcompletionresponseusage.md delete mode 100755 docs/models/shared/createeditresponseusage.md delete mode 100755 docs/models/shared/createembeddingresponsedata.md create mode 100755 docs/models/shared/embedding.md create mode 100755 docs/models/shared/image.md delete mode 100755 docs/models/shared/imagesresponsedata.md create mode 100755 src/sdk/models/shared/completionusage.ts create mode 100755 src/sdk/models/shared/embedding.ts create mode 100755 src/sdk/models/shared/image.ts diff --git a/RELEASES.md b/RELEASES.md index 2c1fe55..33d1fd5 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -430,4 +430,14 @@ Based on: ### Generated - [typescript v2.17.1] . ### Releases -- [NPM v2.17.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.17.1 - . \ No newline at end of file +- [NPM v2.17.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.17.1 - . + +## 2023-08-13 00:55:17 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.71.0 (2.83.3) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.17.2] . +### Releases +- [NPM v2.17.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.17.2 - . \ No newline at end of file diff --git a/docs/models/shared/chatcompletionresponsemessage.md b/docs/models/shared/chatcompletionresponsemessage.md index 9f8ac6c..794904e 100755 --- a/docs/models/shared/chatcompletionresponsemessage.md +++ b/docs/models/shared/chatcompletionresponsemessage.md @@ -1,5 +1,7 @@ # ChatCompletionResponseMessage +A chat completion message generated by the model. + ## Fields diff --git a/docs/models/shared/completionusage.md b/docs/models/shared/completionusage.md new file mode 100755 index 0000000..7e3a83a --- /dev/null +++ b/docs/models/shared/completionusage.md @@ -0,0 +1,12 @@ +# CompletionUsage + +Usage statistics for the completion request. + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | +| `completionTokens` | *number* | :heavy_check_mark: | Number of tokens in the generated completion. | +| `promptTokens` | *number* | :heavy_check_mark: | Number of tokens in the prompt. | +| `totalTokens` | *number* | :heavy_check_mark: | Total number of tokens used in the request (prompt + completion). | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionresponse.md b/docs/models/shared/createchatcompletionresponse.md index 14b4646..ac6bda0 100755 --- a/docs/models/shared/createchatcompletionresponse.md +++ b/docs/models/shared/createchatcompletionresponse.md @@ -1,15 +1,15 @@ # CreateChatCompletionResponse -OK +Represents a chat completion response returned by model, based on the provided input. ## Fields | Field | Type | Required | Description | | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | -| `choices` | [CreateChatCompletionResponseChoices](../../models/shared/createchatcompletionresponsechoices.md)[] | :heavy_check_mark: | N/A | -| `created` | *number* | :heavy_check_mark: | N/A | -| `id` | *string* | :heavy_check_mark: | N/A | -| `model` | *string* | :heavy_check_mark: | N/A | -| `object` | *string* | :heavy_check_mark: | N/A | -| `usage` | [CreateChatCompletionResponseUsage](../../models/shared/createchatcompletionresponseusage.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `choices` | [CreateChatCompletionResponseChoices](../../models/shared/createchatcompletionresponsechoices.md)[] | :heavy_check_mark: | A list of chat completion choices. Can be more than one if `n` is greater than 1. | +| `created` | *number* | :heavy_check_mark: | A unix timestamp of when the chat completion was created. | +| `id` | *string* | :heavy_check_mark: | A unique identifier for the chat completion. | +| `model` | *string* | :heavy_check_mark: | The model used for the chat completion. | +| `object` | *string* | :heavy_check_mark: | The object type, which is always `chat.completion`. | +| `usage` | [CompletionUsage](../../models/shared/completionusage.md) | :heavy_minus_sign: | Usage statistics for the completion request. | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionresponsechoices.md b/docs/models/shared/createchatcompletionresponsechoices.md index 2f12b62..0400387 100755 --- a/docs/models/shared/createchatcompletionresponsechoices.md +++ b/docs/models/shared/createchatcompletionresponsechoices.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | -| `finishReason` | [CreateChatCompletionResponseChoicesFinishReason](../../models/shared/createchatcompletionresponsechoicesfinishreason.md) | :heavy_check_mark: | N/A | -| `index` | *number* | :heavy_check_mark: | N/A | -| `message` | [ChatCompletionResponseMessage](../../models/shared/chatcompletionresponsemessage.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `finishReason` | [CreateChatCompletionResponseChoicesFinishReason](../../models/shared/createchatcompletionresponsechoicesfinishreason.md) | :heavy_check_mark: | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,
`length` if the maximum number of tokens specified in the request was reached, or `function_call` if the model called a function.
| +| `index` | *number* | :heavy_check_mark: | The index of the choice in the list of choices. | +| `message` | [ChatCompletionResponseMessage](../../models/shared/chatcompletionresponsemessage.md) | :heavy_check_mark: | A chat completion message generated by the model. | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md b/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md index 74f5f7d..eafea0f 100755 --- a/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md +++ b/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md @@ -1,5 +1,9 @@ # CreateChatCompletionResponseChoicesFinishReason +The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, +`length` if the maximum number of tokens specified in the request was reached, or `function_call` if the model called a function. + + ## Values diff --git a/docs/models/shared/createchatcompletionresponseusage.md b/docs/models/shared/createchatcompletionresponseusage.md deleted file mode 100755 index 9aaef97..0000000 --- a/docs/models/shared/createchatcompletionresponseusage.md +++ /dev/null @@ -1,10 +0,0 @@ -# CreateChatCompletionResponseUsage - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `completionTokens` | *number* | :heavy_check_mark: | N/A | -| `promptTokens` | *number* | :heavy_check_mark: | N/A | -| `totalTokens` | *number* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponse.md b/docs/models/shared/createcompletionresponse.md index 7607489..643c404 100755 --- a/docs/models/shared/createcompletionresponse.md +++ b/docs/models/shared/createcompletionresponse.md @@ -1,15 +1,16 @@ # CreateCompletionResponse -OK +Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + ## Fields | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | -| `choices` | [CreateCompletionResponseChoices](../../models/shared/createcompletionresponsechoices.md)[] | :heavy_check_mark: | N/A | -| `created` | *number* | :heavy_check_mark: | N/A | -| `id` | *string* | :heavy_check_mark: | N/A | -| `model` | *string* | :heavy_check_mark: | N/A | -| `object` | *string* | :heavy_check_mark: | N/A | -| `usage` | [CreateCompletionResponseUsage](../../models/shared/createcompletionresponseusage.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `choices` | [CreateCompletionResponseChoices](../../models/shared/createcompletionresponsechoices.md)[] | :heavy_check_mark: | The list of completion choices the model generated for the input prompt. | +| `created` | *number* | :heavy_check_mark: | The Unix timestamp of when the completion was created. | +| `id` | *string* | :heavy_check_mark: | A unique identifier for the completion. | +| `model` | *string* | :heavy_check_mark: | The model used for completion. | +| `object` | *string* | :heavy_check_mark: | The object type, which is always "text_completion" | +| `usage` | [CompletionUsage](../../models/shared/completionusage.md) | :heavy_minus_sign: | Usage statistics for the completion request. | \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponsechoices.md b/docs/models/shared/createcompletionresponsechoices.md index 41858e2..2445ca8 100755 --- a/docs/models/shared/createcompletionresponsechoices.md +++ b/docs/models/shared/createcompletionresponsechoices.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | -| `finishReason` | [CreateCompletionResponseChoicesFinishReason](../../models/shared/createcompletionresponsechoicesfinishreason.md) | :heavy_check_mark: | N/A | -| `index` | *number* | :heavy_check_mark: | N/A | -| `logprobs` | [CreateCompletionResponseChoicesLogprobs](../../models/shared/createcompletionresponsechoiceslogprobs.md) | :heavy_check_mark: | N/A | -| `text` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `finishReason` | [CreateCompletionResponseChoicesFinishReason](../../models/shared/createcompletionresponsechoicesfinishreason.md) | :heavy_check_mark: | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,
or `length` if the maximum number of tokens specified in the request was reached.
| +| `index` | *number* | :heavy_check_mark: | N/A | +| `logprobs` | [CreateCompletionResponseChoicesLogprobs](../../models/shared/createcompletionresponsechoiceslogprobs.md) | :heavy_check_mark: | N/A | +| `text` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponsechoicesfinishreason.md b/docs/models/shared/createcompletionresponsechoicesfinishreason.md index 18d1a23..34b66fe 100755 --- a/docs/models/shared/createcompletionresponsechoicesfinishreason.md +++ b/docs/models/shared/createcompletionresponsechoicesfinishreason.md @@ -1,5 +1,9 @@ # CreateCompletionResponseChoicesFinishReason +The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, +or `length` if the maximum number of tokens specified in the request was reached. + + ## Values diff --git a/docs/models/shared/createcompletionresponseusage.md b/docs/models/shared/createcompletionresponseusage.md deleted file mode 100755 index e124187..0000000 --- a/docs/models/shared/createcompletionresponseusage.md +++ /dev/null @@ -1,10 +0,0 @@ -# CreateCompletionResponseUsage - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `completionTokens` | *number* | :heavy_check_mark: | N/A | -| `promptTokens` | *number* | :heavy_check_mark: | N/A | -| `totalTokens` | *number* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createeditresponse.md b/docs/models/shared/createeditresponse.md index b69aff7..2b3d57f 100755 --- a/docs/models/shared/createeditresponse.md +++ b/docs/models/shared/createeditresponse.md @@ -7,7 +7,7 @@ OK | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -| `choices` | [CreateEditResponseChoices](../../models/shared/createeditresponsechoices.md)[] | :heavy_check_mark: | N/A | -| `created` | *number* | :heavy_check_mark: | N/A | -| `object` | *string* | :heavy_check_mark: | N/A | -| `usage` | [CreateEditResponseUsage](../../models/shared/createeditresponseusage.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| `choices` | [CreateEditResponseChoices](../../models/shared/createeditresponsechoices.md)[] | :heavy_check_mark: | A list of edit choices. Can be more than one if `n` is greater than 1. | +| `created` | *number* | :heavy_check_mark: | A unix timestamp of when the edit was created. | +| `object` | *string* | :heavy_check_mark: | The object type, which is always `edit`. | +| `usage` | [CompletionUsage](../../models/shared/completionusage.md) | :heavy_check_mark: | Usage statistics for the completion request. | \ No newline at end of file diff --git a/docs/models/shared/createeditresponsechoices.md b/docs/models/shared/createeditresponsechoices.md index 7f5e22a..8c90e20 100755 --- a/docs/models/shared/createeditresponsechoices.md +++ b/docs/models/shared/createeditresponsechoices.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | -| `finishReason` | [CreateEditResponseChoicesFinishReason](../../models/shared/createeditresponsechoicesfinishreason.md) | :heavy_check_mark: | N/A | -| `index` | *number* | :heavy_check_mark: | N/A | -| `text` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `finishReason` | [CreateEditResponseChoicesFinishReason](../../models/shared/createeditresponsechoicesfinishreason.md) | :heavy_check_mark: | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,
or `length` if the maximum number of tokens specified in the request was reached.
| +| `index` | *number* | :heavy_check_mark: | The index of the choice in the list of choices. | +| `text` | *string* | :heavy_check_mark: | The edited result. | \ No newline at end of file diff --git a/docs/models/shared/createeditresponsechoicesfinishreason.md b/docs/models/shared/createeditresponsechoicesfinishreason.md index 74b87d8..9257539 100755 --- a/docs/models/shared/createeditresponsechoicesfinishreason.md +++ b/docs/models/shared/createeditresponsechoicesfinishreason.md @@ -1,5 +1,9 @@ # CreateEditResponseChoicesFinishReason +The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, +or `length` if the maximum number of tokens specified in the request was reached. + + ## Values diff --git a/docs/models/shared/createeditresponseusage.md b/docs/models/shared/createeditresponseusage.md deleted file mode 100755 index 00c1a10..0000000 --- a/docs/models/shared/createeditresponseusage.md +++ /dev/null @@ -1,10 +0,0 @@ -# CreateEditResponseUsage - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `completionTokens` | *number* | :heavy_check_mark: | N/A | -| `promptTokens` | *number* | :heavy_check_mark: | N/A | -| `totalTokens` | *number* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createembeddingresponse.md b/docs/models/shared/createembeddingresponse.md index 56cc317..18cc762 100755 --- a/docs/models/shared/createembeddingresponse.md +++ b/docs/models/shared/createembeddingresponse.md @@ -7,7 +7,7 @@ OK | Field | Type | Required | Description | | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -| `data` | [CreateEmbeddingResponseData](../../models/shared/createembeddingresponsedata.md)[] | :heavy_check_mark: | N/A | -| `model` | *string* | :heavy_check_mark: | N/A | -| `object` | *string* | :heavy_check_mark: | N/A | -| `usage` | [CreateEmbeddingResponseUsage](../../models/shared/createembeddingresponseusage.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| `data` | [Embedding](../../models/shared/embedding.md)[] | :heavy_check_mark: | The list of embeddings generated by the model. | +| `model` | *string* | :heavy_check_mark: | The name of the model used to generate the embedding. | +| `object` | *string* | :heavy_check_mark: | The object type, which is always "embedding". | +| `usage` | [CreateEmbeddingResponseUsage](../../models/shared/createembeddingresponseusage.md) | :heavy_check_mark: | The usage information for the request. | \ No newline at end of file diff --git a/docs/models/shared/createembeddingresponsedata.md b/docs/models/shared/createembeddingresponsedata.md deleted file mode 100755 index e819885..0000000 --- a/docs/models/shared/createembeddingresponsedata.md +++ /dev/null @@ -1,10 +0,0 @@ -# CreateEmbeddingResponseData - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `embedding` | *number*[] | :heavy_check_mark: | N/A | -| `index` | *number* | :heavy_check_mark: | N/A | -| `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createembeddingresponseusage.md b/docs/models/shared/createembeddingresponseusage.md index d633cc4..3539320 100755 --- a/docs/models/shared/createembeddingresponseusage.md +++ b/docs/models/shared/createembeddingresponseusage.md @@ -1,9 +1,11 @@ # CreateEmbeddingResponseUsage +The usage information for the request. + ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `promptTokens` | *number* | :heavy_check_mark: | N/A | -| `totalTokens` | *number* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------- | ----------------------------------------------- | ----------------------------------------------- | ----------------------------------------------- | +| `promptTokens` | *number* | :heavy_check_mark: | The number of tokens used by the prompt. | +| `totalTokens` | *number* | :heavy_check_mark: | The total number of tokens used by the request. | \ No newline at end of file diff --git a/docs/models/shared/createmoderationresponse.md b/docs/models/shared/createmoderationresponse.md index 76624f1..8717114 100755 --- a/docs/models/shared/createmoderationresponse.md +++ b/docs/models/shared/createmoderationresponse.md @@ -1,12 +1,12 @@ # CreateModerationResponse -OK +Represents policy compliance report by OpenAI's content moderation model against a given input. ## Fields | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | -| `id` | *string* | :heavy_check_mark: | N/A | -| `model` | *string* | :heavy_check_mark: | N/A | -| `results` | [CreateModerationResponseResults](../../models/shared/createmoderationresponseresults.md)[] | :heavy_check_mark: | N/A | \ No newline at end of file +| `id` | *string* | :heavy_check_mark: | The unique identifier for the moderation request. | +| `model` | *string* | :heavy_check_mark: | The model used to generate the moderation results. | +| `results` | [CreateModerationResponseResults](../../models/shared/createmoderationresponseresults.md)[] | :heavy_check_mark: | A list of moderation objects. | \ No newline at end of file diff --git a/docs/models/shared/createmoderationresponseresults.md b/docs/models/shared/createmoderationresponseresults.md index 111730c..4dc296e 100755 --- a/docs/models/shared/createmoderationresponseresults.md +++ b/docs/models/shared/createmoderationresponseresults.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | -| `categories` | [CreateModerationResponseResultsCategories](../../models/shared/createmoderationresponseresultscategories.md) | :heavy_check_mark: | N/A | -| `categoryScores` | [CreateModerationResponseResultsCategoryScores](../../models/shared/createmoderationresponseresultscategoryscores.md) | :heavy_check_mark: | N/A | -| `flagged` | *boolean* | :heavy_check_mark: | N/A | \ No newline at end of file +| `categories` | [CreateModerationResponseResultsCategories](../../models/shared/createmoderationresponseresultscategories.md) | :heavy_check_mark: | A list of the categories, and whether they are flagged or not. | +| `categoryScores` | [CreateModerationResponseResultsCategoryScores](../../models/shared/createmoderationresponseresultscategoryscores.md) | :heavy_check_mark: | A list of the categories along with their scores as predicted by model. | +| `flagged` | *boolean* | :heavy_check_mark: | Whether the content violates [OpenAI's usage policies](/policies/usage-policies). | \ No newline at end of file diff --git a/docs/models/shared/createmoderationresponseresultscategories.md b/docs/models/shared/createmoderationresponseresultscategories.md index b965612..73fa1d6 100755 --- a/docs/models/shared/createmoderationresponseresultscategories.md +++ b/docs/models/shared/createmoderationresponseresultscategories.md @@ -1,14 +1,16 @@ # CreateModerationResponseResultsCategories +A list of the categories, and whether they are flagged or not. + ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `hate` | *boolean* | :heavy_check_mark: | N/A | -| `hateThreatening` | *boolean* | :heavy_check_mark: | N/A | -| `selfHarm` | *boolean* | :heavy_check_mark: | N/A | -| `sexual` | *boolean* | :heavy_check_mark: | N/A | -| `sexualMinors` | *boolean* | :heavy_check_mark: | N/A | -| `violence` | *boolean* | :heavy_check_mark: | N/A | -| `violenceGraphic` | *boolean* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | +| `hate` | *boolean* | :heavy_check_mark: | Whether the content was flagged as 'hate'. | +| `hateThreatening` | *boolean* | :heavy_check_mark: | Whether the content was flagged as 'hate/threatening'. | +| `selfHarm` | *boolean* | :heavy_check_mark: | Whether the content was flagged as 'self-harm'. | +| `sexual` | *boolean* | :heavy_check_mark: | Whether the content was flagged as 'sexual'. | +| `sexualMinors` | *boolean* | :heavy_check_mark: | Whether the content was flagged as 'sexual/minors'. | +| `violence` | *boolean* | :heavy_check_mark: | Whether the content was flagged as 'violence'. | +| `violenceGraphic` | *boolean* | :heavy_check_mark: | Whether the content was flagged as 'violence/graphic'. | \ No newline at end of file diff --git a/docs/models/shared/createmoderationresponseresultscategoryscores.md b/docs/models/shared/createmoderationresponseresultscategoryscores.md index f88d830..0681fd9 100755 --- a/docs/models/shared/createmoderationresponseresultscategoryscores.md +++ b/docs/models/shared/createmoderationresponseresultscategoryscores.md @@ -1,14 +1,16 @@ # CreateModerationResponseResultsCategoryScores +A list of the categories along with their scores as predicted by model. + ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `hate` | *number* | :heavy_check_mark: | N/A | -| `hateThreatening` | *number* | :heavy_check_mark: | N/A | -| `selfHarm` | *number* | :heavy_check_mark: | N/A | -| `sexual` | *number* | :heavy_check_mark: | N/A | -| `sexualMinors` | *number* | :heavy_check_mark: | N/A | -| `violence` | *number* | :heavy_check_mark: | N/A | -| `violenceGraphic` | *number* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `hate` | *number* | :heavy_check_mark: | The score for the category 'hate'. | +| `hateThreatening` | *number* | :heavy_check_mark: | The score for the category 'hate/threatening'. | +| `selfHarm` | *number* | :heavy_check_mark: | The score for the category 'self-harm'. | +| `sexual` | *number* | :heavy_check_mark: | The score for the category 'sexual'. | +| `sexualMinors` | *number* | :heavy_check_mark: | The score for the category 'sexual/minors'. | +| `violence` | *number* | :heavy_check_mark: | The score for the category 'violence'. | +| `violenceGraphic` | *number* | :heavy_check_mark: | The score for the category 'violence/graphic'. | \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequest1.md b/docs/models/shared/createtranscriptionrequest1.md index 8c46750..6d83ce2 100755 --- a/docs/models/shared/createtranscriptionrequest1.md +++ b/docs/models/shared/createtranscriptionrequest1.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `file` | [CreateTranscriptionRequestFile](../../models/shared/createtranscriptionrequestfile.md) | :heavy_check_mark: | The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
| +| `file` | [CreateTranscriptionRequestFile](../../models/shared/createtranscriptionrequestfile.md) | :heavy_check_mark: | The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
| | `language` | *string* | :heavy_minus_sign: | The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
| | `model` | *any* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| | `prompt` | *string* | :heavy_minus_sign: | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
| diff --git a/docs/models/shared/createtranslationrequest.md b/docs/models/shared/createtranslationrequest.md index c870dd8..68e4c52 100755 --- a/docs/models/shared/createtranslationrequest.md +++ b/docs/models/shared/createtranslationrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `file` | [CreateTranslationRequestFile](../../models/shared/createtranslationrequestfile.md) | :heavy_check_mark: | The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
| +| `file` | [CreateTranslationRequestFile](../../models/shared/createtranslationrequestfile.md) | :heavy_check_mark: | The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
| | `model` | *any* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| | `prompt` | *string* | :heavy_minus_sign: | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
| | `responseFormat` | *string* | :heavy_minus_sign: | The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
| diff --git a/docs/models/shared/embedding.md b/docs/models/shared/embedding.md new file mode 100755 index 0000000..a13ff86 --- /dev/null +++ b/docs/models/shared/embedding.md @@ -0,0 +1,13 @@ +# Embedding + +Represents an embedding vector returned by embedding endpoint. + + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `embedding` | *number*[] | :heavy_check_mark: | The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings).
| +| `index` | *number* | :heavy_check_mark: | The index of the embedding in the list of embeddings. | +| `object` | *string* | :heavy_check_mark: | The object type, which is always "embedding". | \ No newline at end of file diff --git a/docs/models/shared/finetune.md b/docs/models/shared/finetune.md index ee00252..964eadd 100755 --- a/docs/models/shared/finetune.md +++ b/docs/models/shared/finetune.md @@ -1,22 +1,23 @@ # FineTune -OK +The `FineTune` object represents a fine-tuning job that has been created through the API. + ## Fields -| Field | Type | Required | Description | -| ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | -| `createdAt` | *number* | :heavy_check_mark: | N/A | -| `events` | [FineTuneEvent](../../models/shared/finetuneevent.md)[] | :heavy_minus_sign: | N/A | -| `fineTunedModel` | *string* | :heavy_check_mark: | N/A | -| `hyperparams` | [FineTuneHyperparams](../../models/shared/finetunehyperparams.md) | :heavy_check_mark: | N/A | -| `id` | *string* | :heavy_check_mark: | N/A | -| `model` | *string* | :heavy_check_mark: | N/A | -| `object` | *string* | :heavy_check_mark: | N/A | -| `organizationId` | *string* | :heavy_check_mark: | N/A | -| `resultFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | N/A | -| `status` | *string* | :heavy_check_mark: | N/A | -| `trainingFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | N/A | -| `updatedAt` | *number* | :heavy_check_mark: | N/A | -| `validationFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| `createdAt` | *number* | :heavy_check_mark: | The unix timestamp for when the fine-tuning job was created. | +| `events` | [FineTuneEvent](../../models/shared/finetuneevent.md)[] | :heavy_minus_sign: | The list of events that have been observed in the lifecycle of the FineTune job. | +| `fineTunedModel` | *string* | :heavy_check_mark: | The name of the fine-tuned model that is being created. | +| `hyperparams` | [FineTuneHyperparams](../../models/shared/finetunehyperparams.md) | :heavy_check_mark: | The hyperparameters used for the fine-tuning job. See the [Fine-tuning Guide](/docs/guides/fine-tuning/hyperparameters) for more details. | +| `id` | *string* | :heavy_check_mark: | The object identifier, which can be referenced in the API endpoints. | +| `model` | *string* | :heavy_check_mark: | The base model that is being fine-tuned. | +| `object` | *string* | :heavy_check_mark: | The object type, which is always "fine-tune". | +| `organizationId` | *string* | :heavy_check_mark: | The organization that owns the fine-tuning job. | +| `resultFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | The compiled results files for the fine-tuning job. | +| `status` | *string* | :heavy_check_mark: | The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, `succeeded`, `failed`, or `cancelled`. | +| `trainingFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | The list of files used for training. | +| `updatedAt` | *number* | :heavy_check_mark: | The unix timestamp for when the fine-tuning job was last updated. | +| `validationFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | The list of files used for validation. | \ No newline at end of file diff --git a/docs/models/shared/finetunehyperparams.md b/docs/models/shared/finetunehyperparams.md index c3c6f67..f06d277 100755 --- a/docs/models/shared/finetunehyperparams.md +++ b/docs/models/shared/finetunehyperparams.md @@ -1,14 +1,16 @@ # FineTuneHyperparams +The hyperparameters used for the fine-tuning job. See the [Fine-tuning Guide](/docs/guides/fine-tuning/hyperparameters) for more details. + ## Fields -| Field | Type | Required | Description | -| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | -| `batchSize` | *number* | :heavy_check_mark: | N/A | -| `classificationNClasses` | *number* | :heavy_minus_sign: | N/A | -| `classificationPositiveClass` | *string* | :heavy_minus_sign: | N/A | -| `computeClassificationMetrics` | *boolean* | :heavy_minus_sign: | N/A | -| `learningRateMultiplier` | *number* | :heavy_check_mark: | N/A | -| `nEpochs` | *number* | :heavy_check_mark: | N/A | -| `promptLossWeight` | *number* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| `batchSize` | *number* | :heavy_check_mark: | The batch size to use for training. The batch size is the number of
training examples used to train a single forward and backward pass.
| +| `classificationNClasses` | *number* | :heavy_minus_sign: | The number of classes to use for computing classification metrics.
| +| `classificationPositiveClass` | *string* | :heavy_minus_sign: | The positive class to use for computing classification metrics.
| +| `computeClassificationMetrics` | *boolean* | :heavy_minus_sign: | The classification metrics to compute using the validation dataset at the end of every epoch.
| +| `learningRateMultiplier` | *number* | :heavy_check_mark: | The learning rate multiplier to use for training.
| +| `nEpochs` | *number* | :heavy_check_mark: | The number of epochs to train the model for. An epoch refers to one
full cycle through the training dataset.
| +| `promptLossWeight` | *number* | :heavy_check_mark: | The weight to use for loss on the prompt tokens.
| \ No newline at end of file diff --git a/docs/models/shared/image.md b/docs/models/shared/image.md new file mode 100755 index 0000000..598d970 --- /dev/null +++ b/docs/models/shared/image.md @@ -0,0 +1,11 @@ +# Image + +Represents the url or the content of an image generated by the OpenAI API. + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| `b64Json` | *string* | :heavy_minus_sign: | The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. | +| `url` | *string* | :heavy_minus_sign: | The URL of the generated image, if `response_format` is `url` (default). | \ No newline at end of file diff --git a/docs/models/shared/imagesresponse.md b/docs/models/shared/imagesresponse.md index 49610b7..ed03dba 100755 --- a/docs/models/shared/imagesresponse.md +++ b/docs/models/shared/imagesresponse.md @@ -5,7 +5,7 @@ OK ## Fields -| Field | Type | Required | Description | -| ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | -| `created` | *number* | :heavy_check_mark: | N/A | -| `data` | [ImagesResponseData](../../models/shared/imagesresponsedata.md)[] | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| --------------------------------------- | --------------------------------------- | --------------------------------------- | --------------------------------------- | +| `created` | *number* | :heavy_check_mark: | N/A | +| `data` | [Image](../../models/shared/image.md)[] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/imagesresponsedata.md b/docs/models/shared/imagesresponsedata.md deleted file mode 100755 index 70f28eb..0000000 --- a/docs/models/shared/imagesresponsedata.md +++ /dev/null @@ -1,9 +0,0 @@ -# ImagesResponseData - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `b64Json` | *string* | :heavy_minus_sign: | N/A | -| `url` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/model.md b/docs/models/shared/model.md index 2812a9b..21c0bd5 100755 --- a/docs/models/shared/model.md +++ b/docs/models/shared/model.md @@ -1,13 +1,13 @@ # Model -OK +Describes an OpenAI model offering that can be used with the API. ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `created` | *number* | :heavy_check_mark: | N/A | -| `id` | *string* | :heavy_check_mark: | N/A | -| `object` | *string* | :heavy_check_mark: | N/A | -| `ownedBy` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `created` | *number* | :heavy_check_mark: | The date and time when the model was created. | +| `id` | *string* | :heavy_check_mark: | The model identifier, which can be referenced in the API endpoints. | +| `object` | *string* | :heavy_check_mark: | The object type, which is always "model". | +| `ownedBy` | *string* | :heavy_check_mark: | The organization that owns the model. | \ No newline at end of file diff --git a/docs/models/shared/openaifile.md b/docs/models/shared/openaifile.md index 4cb9697..e08e546 100755 --- a/docs/models/shared/openaifile.md +++ b/docs/models/shared/openaifile.md @@ -1,17 +1,18 @@ # OpenAIFile -OK +The `File` object represents a document that has been uploaded to OpenAI. + ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `bytes` | *number* | :heavy_check_mark: | N/A | -| `createdAt` | *number* | :heavy_check_mark: | N/A | -| `filename` | *string* | :heavy_check_mark: | N/A | -| `id` | *string* | :heavy_check_mark: | N/A | -| `object` | *string* | :heavy_check_mark: | N/A | -| `purpose` | *string* | :heavy_check_mark: | N/A | -| `status` | *string* | :heavy_minus_sign: | N/A | -| `statusDetails` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| `bytes` | *number* | :heavy_check_mark: | The size of the file in bytes. | +| `createdAt` | *number* | :heavy_check_mark: | The unix timestamp for when the file was created. | +| `filename` | *string* | :heavy_check_mark: | The name of the file. | +| `id` | *string* | :heavy_check_mark: | The file identifier, which can be referenced in the API endpoints. | +| `object` | *string* | :heavy_check_mark: | The object type, which is always "file". | +| `purpose` | *string* | :heavy_check_mark: | The intended purpose of the file. Currently, only "fine-tune" is supported. | +| `status` | *string* | :heavy_minus_sign: | The current status of the file, which can be either `uploaded`, `processed`, `pending`, `error`, `deleting` or `deleted`. | +| `statusDetails` | *string* | :heavy_minus_sign: | Additional details about the status of the file. If the file is in the `error` state, this will include a message describing the error.
| \ No newline at end of file diff --git a/files.gen b/files.gen index 722e2bc..0f3953e 100755 --- a/files.gen +++ b/files.gen @@ -47,6 +47,7 @@ src/sdk/models/shared/finetune.ts src/sdk/models/shared/openaifile.ts src/sdk/models/shared/finetuneevent.ts src/sdk/models/shared/createchatcompletionresponse.ts +src/sdk/models/shared/completionusage.ts src/sdk/models/shared/chatcompletionresponsemessage.ts src/sdk/models/shared/createchatcompletionrequest.ts src/sdk/models/shared/chatcompletionrequestmessage.ts @@ -56,10 +57,12 @@ src/sdk/models/shared/createcompletionrequest.ts src/sdk/models/shared/createeditresponse.ts src/sdk/models/shared/createeditrequest.ts src/sdk/models/shared/createembeddingresponse.ts +src/sdk/models/shared/embedding.ts src/sdk/models/shared/createembeddingrequest.ts src/sdk/models/shared/createfilerequest.ts src/sdk/models/shared/createfinetunerequest.ts src/sdk/models/shared/imagesresponse.ts +src/sdk/models/shared/image.ts src/sdk/models/shared/createimagerequest.ts src/sdk/models/shared/createimageeditrequest2.ts src/sdk/models/shared/createimagevariationrequest2.ts @@ -118,8 +121,8 @@ docs/models/shared/openaifile.md docs/models/shared/finetuneevent.md docs/models/shared/createchatcompletionresponsechoicesfinishreason.md docs/models/shared/createchatcompletionresponsechoices.md -docs/models/shared/createchatcompletionresponseusage.md docs/models/shared/createchatcompletionresponse.md +docs/models/shared/completionusage.md docs/models/shared/chatcompletionresponsemessagefunctioncall.md docs/models/shared/chatcompletionresponsemessagerole.md docs/models/shared/chatcompletionresponsemessage.md @@ -134,27 +137,25 @@ docs/models/shared/chatcompletionfunctions.md docs/models/shared/createcompletionresponsechoicesfinishreason.md docs/models/shared/createcompletionresponsechoiceslogprobs.md docs/models/shared/createcompletionresponsechoices.md -docs/models/shared/createcompletionresponseusage.md docs/models/shared/createcompletionresponse.md docs/models/shared/createcompletionrequestmodel2.md docs/models/shared/createcompletionrequest.md docs/models/shared/createeditresponsechoicesfinishreason.md docs/models/shared/createeditresponsechoices.md -docs/models/shared/createeditresponseusage.md docs/models/shared/createeditresponse.md docs/models/shared/createeditrequestmodel2.md docs/models/shared/createeditrequest.md -docs/models/shared/createembeddingresponsedata.md docs/models/shared/createembeddingresponseusage.md docs/models/shared/createembeddingresponse.md +docs/models/shared/embedding.md docs/models/shared/createembeddingrequestmodel2.md docs/models/shared/createembeddingrequest.md docs/models/shared/createfilerequestfile.md docs/models/shared/createfilerequest.md docs/models/shared/createfinetunerequestmodel2.md docs/models/shared/createfinetunerequest.md -docs/models/shared/imagesresponsedata.md docs/models/shared/imagesresponse.md +docs/models/shared/image.md docs/models/shared/createimagerequestresponseformat.md docs/models/shared/createimagerequestsize.md docs/models/shared/createimagerequest.md diff --git a/gen.yaml b/gen.yaml index f966c88..1c1bb30 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,6 +1,6 @@ configVersion: 1.0.0 management: - docChecksum: 679eee9640a500ecd4a9a1b0e8a56da3 + docChecksum: c73282185ebaac1c90cd78ffee98b5f9 docVersion: 2.0.0 speakeasyVersion: 1.71.0 generationVersion: 2.83.3 @@ -15,7 +15,7 @@ features: deprecations: 2.81.1 globalServerURLs: 2.81.1 typescript: - version: 2.17.1 + version: 2.17.2 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 4b51ad2..2bb1011 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.17.1", + "version": "2.17.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.17.1", + "version": "2.17.2", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 588f11c..d16b81c 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.17.1", + "version": "2.17.2", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/chatcompletionresponsemessage.ts b/src/sdk/models/shared/chatcompletionresponsemessage.ts index ae5ed14..1a7493c 100755 --- a/src/sdk/models/shared/chatcompletionresponsemessage.ts +++ b/src/sdk/models/shared/chatcompletionresponsemessage.ts @@ -34,6 +34,9 @@ export enum ChatCompletionResponseMessageRole { Function = "function", } +/** + * A chat completion message generated by the model. + */ export class ChatCompletionResponseMessage extends SpeakeasyBase { /** * The contents of the message. diff --git a/src/sdk/models/shared/completionusage.ts b/src/sdk/models/shared/completionusage.ts new file mode 100755 index 0000000..4bed6d8 --- /dev/null +++ b/src/sdk/models/shared/completionusage.ts @@ -0,0 +1,32 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Expose } from "class-transformer"; + +/** + * Usage statistics for the completion request. + */ +export class CompletionUsage extends SpeakeasyBase { + /** + * Number of tokens in the generated completion. + */ + @SpeakeasyMetadata() + @Expose({ name: "completion_tokens" }) + completionTokens: number; + + /** + * Number of tokens in the prompt. + */ + @SpeakeasyMetadata() + @Expose({ name: "prompt_tokens" }) + promptTokens: number; + + /** + * Total number of tokens used in the request (prompt + completion). + */ + @SpeakeasyMetadata() + @Expose({ name: "total_tokens" }) + totalTokens: number; +} diff --git a/src/sdk/models/shared/createchatcompletionresponse.ts b/src/sdk/models/shared/createchatcompletionresponse.ts index b57096f..74e6155 100755 --- a/src/sdk/models/shared/createchatcompletionresponse.ts +++ b/src/sdk/models/shared/createchatcompletionresponse.ts @@ -4,8 +4,16 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { ChatCompletionResponseMessage } from "./chatcompletionresponsemessage"; +import { CompletionUsage } from "./completionusage"; import { Expose, Type } from "class-transformer"; +/** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + * + * @remarks + * `length` if the maximum number of tokens specified in the request was reached, or `function_call` if the model called a function. + * + */ export enum CreateChatCompletionResponseChoicesFinishReason { Stop = "stop", Length = "length", @@ -13,61 +21,78 @@ export enum CreateChatCompletionResponseChoicesFinishReason { } export class CreateChatCompletionResponseChoices extends SpeakeasyBase { + /** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + * + * @remarks + * `length` if the maximum number of tokens specified in the request was reached, or `function_call` if the model called a function. + * + */ @SpeakeasyMetadata() @Expose({ name: "finish_reason" }) finishReason: CreateChatCompletionResponseChoicesFinishReason; + /** + * The index of the choice in the list of choices. + */ @SpeakeasyMetadata() @Expose({ name: "index" }) index: number; + /** + * A chat completion message generated by the model. + */ @SpeakeasyMetadata() @Expose({ name: "message" }) @Type(() => ChatCompletionResponseMessage) message: ChatCompletionResponseMessage; } -export class CreateChatCompletionResponseUsage extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "completion_tokens" }) - completionTokens: number; - - @SpeakeasyMetadata() - @Expose({ name: "prompt_tokens" }) - promptTokens: number; - - @SpeakeasyMetadata() - @Expose({ name: "total_tokens" }) - totalTokens: number; -} - /** - * OK + * Represents a chat completion response returned by model, based on the provided input. */ export class CreateChatCompletionResponse extends SpeakeasyBase { + /** + * A list of chat completion choices. Can be more than one if `n` is greater than 1. + */ @SpeakeasyMetadata({ elemType: CreateChatCompletionResponseChoices }) @Expose({ name: "choices" }) @Type(() => CreateChatCompletionResponseChoices) choices: CreateChatCompletionResponseChoices[]; + /** + * A unix timestamp of when the chat completion was created. + */ @SpeakeasyMetadata() @Expose({ name: "created" }) created: number; + /** + * A unique identifier for the chat completion. + */ @SpeakeasyMetadata() @Expose({ name: "id" }) id: string; + /** + * The model used for the chat completion. + */ @SpeakeasyMetadata() @Expose({ name: "model" }) model: string; + /** + * The object type, which is always `chat.completion`. + */ @SpeakeasyMetadata() @Expose({ name: "object" }) object: string; + /** + * Usage statistics for the completion request. + */ @SpeakeasyMetadata() @Expose({ name: "usage" }) - @Type(() => CreateChatCompletionResponseUsage) - usage?: CreateChatCompletionResponseUsage; + @Type(() => CompletionUsage) + usage?: CompletionUsage; } diff --git a/src/sdk/models/shared/createcompletionresponse.ts b/src/sdk/models/shared/createcompletionresponse.ts index 0894461..1e9bf88 100755 --- a/src/sdk/models/shared/createcompletionresponse.ts +++ b/src/sdk/models/shared/createcompletionresponse.ts @@ -3,8 +3,16 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { CompletionUsage } from "./completionusage"; import { Expose, Type } from "class-transformer"; +/** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + * + * @remarks + * or `length` if the maximum number of tokens specified in the request was reached. + * + */ export enum CreateCompletionResponseChoicesFinishReason { Stop = "stop", Length = "length", @@ -29,6 +37,13 @@ export class CreateCompletionResponseChoicesLogprobs extends SpeakeasyBase { } export class CreateCompletionResponseChoices extends SpeakeasyBase { + /** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + * + * @remarks + * or `length` if the maximum number of tokens specified in the request was reached. + * + */ @SpeakeasyMetadata() @Expose({ name: "finish_reason" }) finishReason: CreateCompletionResponseChoicesFinishReason; @@ -47,47 +62,54 @@ export class CreateCompletionResponseChoices extends SpeakeasyBase { text: string; } -export class CreateCompletionResponseUsage extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "completion_tokens" }) - completionTokens: number; - - @SpeakeasyMetadata() - @Expose({ name: "prompt_tokens" }) - promptTokens: number; - - @SpeakeasyMetadata() - @Expose({ name: "total_tokens" }) - totalTokens: number; -} - /** - * OK + * Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + * + * @remarks + * */ export class CreateCompletionResponse extends SpeakeasyBase { + /** + * The list of completion choices the model generated for the input prompt. + */ @SpeakeasyMetadata({ elemType: CreateCompletionResponseChoices }) @Expose({ name: "choices" }) @Type(() => CreateCompletionResponseChoices) choices: CreateCompletionResponseChoices[]; + /** + * The Unix timestamp of when the completion was created. + */ @SpeakeasyMetadata() @Expose({ name: "created" }) created: number; + /** + * A unique identifier for the completion. + */ @SpeakeasyMetadata() @Expose({ name: "id" }) id: string; + /** + * The model used for completion. + */ @SpeakeasyMetadata() @Expose({ name: "model" }) model: string; + /** + * The object type, which is always "text_completion" + */ @SpeakeasyMetadata() @Expose({ name: "object" }) object: string; + /** + * Usage statistics for the completion request. + */ @SpeakeasyMetadata() @Expose({ name: "usage" }) - @Type(() => CreateCompletionResponseUsage) - usage?: CreateCompletionResponseUsage; + @Type(() => CompletionUsage) + usage?: CompletionUsage; } diff --git a/src/sdk/models/shared/createeditresponse.ts b/src/sdk/models/shared/createeditresponse.ts index 936d3e7..5e9ee99 100755 --- a/src/sdk/models/shared/createeditresponse.ts +++ b/src/sdk/models/shared/createeditresponse.ts @@ -3,60 +3,79 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { CompletionUsage } from "./completionusage"; import { Expose, Type } from "class-transformer"; +/** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + * + * @remarks + * or `length` if the maximum number of tokens specified in the request was reached. + * + */ export enum CreateEditResponseChoicesFinishReason { Stop = "stop", Length = "length", } export class CreateEditResponseChoices extends SpeakeasyBase { + /** + * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + * + * @remarks + * or `length` if the maximum number of tokens specified in the request was reached. + * + */ @SpeakeasyMetadata() @Expose({ name: "finish_reason" }) finishReason: CreateEditResponseChoicesFinishReason; + /** + * The index of the choice in the list of choices. + */ @SpeakeasyMetadata() @Expose({ name: "index" }) index: number; + /** + * The edited result. + */ @SpeakeasyMetadata() @Expose({ name: "text" }) text: string; } -export class CreateEditResponseUsage extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "completion_tokens" }) - completionTokens: number; - - @SpeakeasyMetadata() - @Expose({ name: "prompt_tokens" }) - promptTokens: number; - - @SpeakeasyMetadata() - @Expose({ name: "total_tokens" }) - totalTokens: number; -} - /** * OK */ export class CreateEditResponse extends SpeakeasyBase { + /** + * A list of edit choices. Can be more than one if `n` is greater than 1. + */ @SpeakeasyMetadata({ elemType: CreateEditResponseChoices }) @Expose({ name: "choices" }) @Type(() => CreateEditResponseChoices) choices: CreateEditResponseChoices[]; + /** + * A unix timestamp of when the edit was created. + */ @SpeakeasyMetadata() @Expose({ name: "created" }) created: number; + /** + * The object type, which is always `edit`. + */ @SpeakeasyMetadata() @Expose({ name: "object" }) object: string; + /** + * Usage statistics for the completion request. + */ @SpeakeasyMetadata() @Expose({ name: "usage" }) - @Type(() => CreateEditResponseUsage) - usage: CreateEditResponseUsage; + @Type(() => CompletionUsage) + usage: CompletionUsage; } diff --git a/src/sdk/models/shared/createembeddingresponse.ts b/src/sdk/models/shared/createembeddingresponse.ts index 1a468a5..3cea575 100755 --- a/src/sdk/models/shared/createembeddingresponse.ts +++ b/src/sdk/models/shared/createembeddingresponse.ts @@ -3,27 +3,23 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Embedding } from "./embedding"; import { Expose, Type } from "class-transformer"; -export class CreateEmbeddingResponseData extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "embedding" }) - embedding: number[]; - - @SpeakeasyMetadata() - @Expose({ name: "index" }) - index: number; - - @SpeakeasyMetadata() - @Expose({ name: "object" }) - object: string; -} - +/** + * The usage information for the request. + */ export class CreateEmbeddingResponseUsage extends SpeakeasyBase { + /** + * The number of tokens used by the prompt. + */ @SpeakeasyMetadata() @Expose({ name: "prompt_tokens" }) promptTokens: number; + /** + * The total number of tokens used by the request. + */ @SpeakeasyMetadata() @Expose({ name: "total_tokens" }) totalTokens: number; @@ -33,19 +29,31 @@ export class CreateEmbeddingResponseUsage extends SpeakeasyBase { * OK */ export class CreateEmbeddingResponse extends SpeakeasyBase { - @SpeakeasyMetadata({ elemType: CreateEmbeddingResponseData }) + /** + * The list of embeddings generated by the model. + */ + @SpeakeasyMetadata({ elemType: Embedding }) @Expose({ name: "data" }) - @Type(() => CreateEmbeddingResponseData) - data: CreateEmbeddingResponseData[]; + @Type(() => Embedding) + data: Embedding[]; + /** + * The name of the model used to generate the embedding. + */ @SpeakeasyMetadata() @Expose({ name: "model" }) model: string; + /** + * The object type, which is always "embedding". + */ @SpeakeasyMetadata() @Expose({ name: "object" }) object: string; + /** + * The usage information for the request. + */ @SpeakeasyMetadata() @Expose({ name: "usage" }) @Type(() => CreateEmbeddingResponseUsage) diff --git a/src/sdk/models/shared/createmoderationresponse.ts b/src/sdk/models/shared/createmoderationresponse.ts index d50b16b..3a5384a 100755 --- a/src/sdk/models/shared/createmoderationresponse.ts +++ b/src/sdk/models/shared/createmoderationresponse.ts @@ -5,94 +5,160 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose, Type } from "class-transformer"; +/** + * A list of the categories, and whether they are flagged or not. + */ export class CreateModerationResponseResultsCategories extends SpeakeasyBase { + /** + * Whether the content was flagged as 'hate'. + */ @SpeakeasyMetadata() @Expose({ name: "hate" }) hate: boolean; + /** + * Whether the content was flagged as 'hate/threatening'. + */ @SpeakeasyMetadata() @Expose({ name: "hate/threatening" }) hateThreatening: boolean; + /** + * Whether the content was flagged as 'self-harm'. + */ @SpeakeasyMetadata() @Expose({ name: "self-harm" }) selfHarm: boolean; + /** + * Whether the content was flagged as 'sexual'. + */ @SpeakeasyMetadata() @Expose({ name: "sexual" }) sexual: boolean; + /** + * Whether the content was flagged as 'sexual/minors'. + */ @SpeakeasyMetadata() @Expose({ name: "sexual/minors" }) sexualMinors: boolean; + /** + * Whether the content was flagged as 'violence'. + */ @SpeakeasyMetadata() @Expose({ name: "violence" }) violence: boolean; + /** + * Whether the content was flagged as 'violence/graphic'. + */ @SpeakeasyMetadata() @Expose({ name: "violence/graphic" }) violenceGraphic: boolean; } +/** + * A list of the categories along with their scores as predicted by model. + */ export class CreateModerationResponseResultsCategoryScores extends SpeakeasyBase { + /** + * The score for the category 'hate'. + */ @SpeakeasyMetadata() @Expose({ name: "hate" }) hate: number; + /** + * The score for the category 'hate/threatening'. + */ @SpeakeasyMetadata() @Expose({ name: "hate/threatening" }) hateThreatening: number; + /** + * The score for the category 'self-harm'. + */ @SpeakeasyMetadata() @Expose({ name: "self-harm" }) selfHarm: number; + /** + * The score for the category 'sexual'. + */ @SpeakeasyMetadata() @Expose({ name: "sexual" }) sexual: number; + /** + * The score for the category 'sexual/minors'. + */ @SpeakeasyMetadata() @Expose({ name: "sexual/minors" }) sexualMinors: number; + /** + * The score for the category 'violence'. + */ @SpeakeasyMetadata() @Expose({ name: "violence" }) violence: number; + /** + * The score for the category 'violence/graphic'. + */ @SpeakeasyMetadata() @Expose({ name: "violence/graphic" }) violenceGraphic: number; } export class CreateModerationResponseResults extends SpeakeasyBase { + /** + * A list of the categories, and whether they are flagged or not. + */ @SpeakeasyMetadata() @Expose({ name: "categories" }) @Type(() => CreateModerationResponseResultsCategories) categories: CreateModerationResponseResultsCategories; + /** + * A list of the categories along with their scores as predicted by model. + */ @SpeakeasyMetadata() @Expose({ name: "category_scores" }) @Type(() => CreateModerationResponseResultsCategoryScores) categoryScores: CreateModerationResponseResultsCategoryScores; + /** + * Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + */ @SpeakeasyMetadata() @Expose({ name: "flagged" }) flagged: boolean; } /** - * OK + * Represents policy compliance report by OpenAI's content moderation model against a given input. */ export class CreateModerationResponse extends SpeakeasyBase { + /** + * The unique identifier for the moderation request. + */ @SpeakeasyMetadata() @Expose({ name: "id" }) id: string; + /** + * The model used to generate the moderation results. + */ @SpeakeasyMetadata() @Expose({ name: "model" }) model: string; + /** + * A list of moderation objects. + */ @SpeakeasyMetadata({ elemType: CreateModerationResponseResults }) @Expose({ name: "results" }) @Type(() => CreateModerationResponseResults) diff --git a/src/sdk/models/shared/createtranscriptionrequest1.ts b/src/sdk/models/shared/createtranscriptionrequest1.ts index bc90b8f..8d6b1a1 100755 --- a/src/sdk/models/shared/createtranscriptionrequest1.ts +++ b/src/sdk/models/shared/createtranscriptionrequest1.ts @@ -38,7 +38,7 @@ export enum CreateTranscriptionRequestResponseFormat { export class CreateTranscriptionRequest1 extends SpeakeasyBase { /** - * The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. + * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. * * @remarks * diff --git a/src/sdk/models/shared/createtranslationrequest.ts b/src/sdk/models/shared/createtranslationrequest.ts index f085a94..f2efcb8 100755 --- a/src/sdk/models/shared/createtranslationrequest.ts +++ b/src/sdk/models/shared/createtranslationrequest.ts @@ -24,7 +24,7 @@ export enum CreateTranslationRequestModel2 { export class CreateTranslationRequest extends SpeakeasyBase { /** - * The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm. + * The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. * * @remarks * diff --git a/src/sdk/models/shared/embedding.ts b/src/sdk/models/shared/embedding.ts new file mode 100755 index 0000000..18a5056 --- /dev/null +++ b/src/sdk/models/shared/embedding.ts @@ -0,0 +1,38 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Expose } from "class-transformer"; + +/** + * Represents an embedding vector returned by embedding endpoint. + * + * @remarks + * + */ +export class Embedding extends SpeakeasyBase { + /** + * The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). + * + * @remarks + * + */ + @SpeakeasyMetadata() + @Expose({ name: "embedding" }) + embedding: number[]; + + /** + * The index of the embedding in the list of embeddings. + */ + @SpeakeasyMetadata() + @Expose({ name: "index" }) + index: number; + + /** + * The object type, which is always "embedding". + */ + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; +} diff --git a/src/sdk/models/shared/finetune.ts b/src/sdk/models/shared/finetune.ts index 53a3b5c..08b6bc2 100755 --- a/src/sdk/models/shared/finetune.ts +++ b/src/sdk/models/shared/finetune.ts @@ -7,92 +7,181 @@ import { FineTuneEvent } from "./finetuneevent"; import { OpenAIFile } from "./openaifile"; import { Expose, Type } from "class-transformer"; +/** + * The hyperparameters used for the fine-tuning job. See the [Fine-tuning Guide](/docs/guides/fine-tuning/hyperparameters) for more details. + */ export class FineTuneHyperparams extends SpeakeasyBase { + /** + * The batch size to use for training. The batch size is the number of + * + * @remarks + * training examples used to train a single forward and backward pass. + * + */ @SpeakeasyMetadata() @Expose({ name: "batch_size" }) batchSize: number; + /** + * The number of classes to use for computing classification metrics. + * + * @remarks + * + */ @SpeakeasyMetadata() @Expose({ name: "classification_n_classes" }) classificationNClasses?: number; + /** + * The positive class to use for computing classification metrics. + * + * @remarks + * + */ @SpeakeasyMetadata() @Expose({ name: "classification_positive_class" }) classificationPositiveClass?: string; + /** + * The classification metrics to compute using the validation dataset at the end of every epoch. + * + * @remarks + * + */ @SpeakeasyMetadata() @Expose({ name: "compute_classification_metrics" }) computeClassificationMetrics?: boolean; + /** + * The learning rate multiplier to use for training. + * + * @remarks + * + */ @SpeakeasyMetadata() @Expose({ name: "learning_rate_multiplier" }) learningRateMultiplier: number; + /** + * The number of epochs to train the model for. An epoch refers to one + * + * @remarks + * full cycle through the training dataset. + * + */ @SpeakeasyMetadata() @Expose({ name: "n_epochs" }) nEpochs: number; + /** + * The weight to use for loss on the prompt tokens. + * + * @remarks + * + */ @SpeakeasyMetadata() @Expose({ name: "prompt_loss_weight" }) promptLossWeight: number; } /** - * OK + * The `FineTune` object represents a fine-tuning job that has been created through the API. + * + * @remarks + * */ export class FineTune extends SpeakeasyBase { + /** + * The unix timestamp for when the fine-tuning job was created. + */ @SpeakeasyMetadata() @Expose({ name: "created_at" }) createdAt: number; + /** + * The list of events that have been observed in the lifecycle of the FineTune job. + */ @SpeakeasyMetadata({ elemType: FineTuneEvent }) @Expose({ name: "events" }) @Type(() => FineTuneEvent) events?: FineTuneEvent[]; + /** + * The name of the fine-tuned model that is being created. + */ @SpeakeasyMetadata() @Expose({ name: "fine_tuned_model" }) fineTunedModel: string; + /** + * The hyperparameters used for the fine-tuning job. See the [Fine-tuning Guide](/docs/guides/fine-tuning/hyperparameters) for more details. + */ @SpeakeasyMetadata() @Expose({ name: "hyperparams" }) @Type(() => FineTuneHyperparams) hyperparams: FineTuneHyperparams; + /** + * The object identifier, which can be referenced in the API endpoints. + */ @SpeakeasyMetadata() @Expose({ name: "id" }) id: string; + /** + * The base model that is being fine-tuned. + */ @SpeakeasyMetadata() @Expose({ name: "model" }) model: string; + /** + * The object type, which is always "fine-tune". + */ @SpeakeasyMetadata() @Expose({ name: "object" }) object: string; + /** + * The organization that owns the fine-tuning job. + */ @SpeakeasyMetadata() @Expose({ name: "organization_id" }) organizationId: string; + /** + * The compiled results files for the fine-tuning job. + */ @SpeakeasyMetadata({ elemType: OpenAIFile }) @Expose({ name: "result_files" }) @Type(() => OpenAIFile) resultFiles: OpenAIFile[]; + /** + * The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, `succeeded`, `failed`, or `cancelled`. + */ @SpeakeasyMetadata() @Expose({ name: "status" }) status: string; + /** + * The list of files used for training. + */ @SpeakeasyMetadata({ elemType: OpenAIFile }) @Expose({ name: "training_files" }) @Type(() => OpenAIFile) trainingFiles: OpenAIFile[]; + /** + * The unix timestamp for when the fine-tuning job was last updated. + */ @SpeakeasyMetadata() @Expose({ name: "updated_at" }) updatedAt: number; + /** + * The list of files used for validation. + */ @SpeakeasyMetadata({ elemType: OpenAIFile }) @Expose({ name: "validation_files" }) @Type(() => OpenAIFile) diff --git a/src/sdk/models/shared/image.ts b/src/sdk/models/shared/image.ts new file mode 100755 index 0000000..318230a --- /dev/null +++ b/src/sdk/models/shared/image.ts @@ -0,0 +1,25 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Expose } from "class-transformer"; + +/** + * Represents the url or the content of an image generated by the OpenAI API. + */ +export class Image extends SpeakeasyBase { + /** + * The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + */ + @SpeakeasyMetadata() + @Expose({ name: "b64_json" }) + b64Json?: string; + + /** + * The URL of the generated image, if `response_format` is `url` (default). + */ + @SpeakeasyMetadata() + @Expose({ name: "url" }) + url?: string; +} diff --git a/src/sdk/models/shared/imagesresponse.ts b/src/sdk/models/shared/imagesresponse.ts index de0eac2..352015e 100755 --- a/src/sdk/models/shared/imagesresponse.ts +++ b/src/sdk/models/shared/imagesresponse.ts @@ -3,18 +3,9 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Image } from "./image"; import { Expose, Type } from "class-transformer"; -export class ImagesResponseData extends SpeakeasyBase { - @SpeakeasyMetadata() - @Expose({ name: "b64_json" }) - b64Json?: string; - - @SpeakeasyMetadata() - @Expose({ name: "url" }) - url?: string; -} - /** * OK */ @@ -23,8 +14,8 @@ export class ImagesResponse extends SpeakeasyBase { @Expose({ name: "created" }) created: number; - @SpeakeasyMetadata({ elemType: ImagesResponseData }) + @SpeakeasyMetadata({ elemType: Image }) @Expose({ name: "data" }) - @Type(() => ImagesResponseData) - data: ImagesResponseData[]; + @Type(() => Image) + data: Image[]; } diff --git a/src/sdk/models/shared/index.ts b/src/sdk/models/shared/index.ts index b6be31b..02f2033 100755 --- a/src/sdk/models/shared/index.ts +++ b/src/sdk/models/shared/index.ts @@ -5,6 +5,7 @@ export * from "./chatcompletionfunctions"; export * from "./chatcompletionrequestmessage"; export * from "./chatcompletionresponsemessage"; +export * from "./completionusage"; export * from "./createchatcompletionrequest"; export * from "./createchatcompletionresponse"; export * from "./createcompletionrequest"; @@ -26,8 +27,10 @@ export * from "./createtranslationrequest"; export * from "./createtranslationresponse"; export * from "./deletefileresponse"; export * from "./deletemodelresponse"; +export * from "./embedding"; export * from "./finetune"; export * from "./finetuneevent"; +export * from "./image"; export * from "./imagesresponse"; export * from "./listfilesresponse"; export * from "./listfinetuneeventsresponse"; diff --git a/src/sdk/models/shared/model.ts b/src/sdk/models/shared/model.ts index 9b30683..7313588 100755 --- a/src/sdk/models/shared/model.ts +++ b/src/sdk/models/shared/model.ts @@ -6,21 +6,33 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; /** - * OK + * Describes an OpenAI model offering that can be used with the API. */ export class Model extends SpeakeasyBase { + /** + * The date and time when the model was created. + */ @SpeakeasyMetadata() @Expose({ name: "created" }) created: number; + /** + * The model identifier, which can be referenced in the API endpoints. + */ @SpeakeasyMetadata() @Expose({ name: "id" }) id: string; + /** + * The object type, which is always "model". + */ @SpeakeasyMetadata() @Expose({ name: "object" }) object: string; + /** + * The organization that owns the model. + */ @SpeakeasyMetadata() @Expose({ name: "owned_by" }) ownedBy: string; diff --git a/src/sdk/models/shared/openaifile.ts b/src/sdk/models/shared/openaifile.ts index 8d5d59b..423f53c 100755 --- a/src/sdk/models/shared/openaifile.ts +++ b/src/sdk/models/shared/openaifile.ts @@ -6,37 +6,67 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; /** - * OK + * The `File` object represents a document that has been uploaded to OpenAI. + * + * @remarks + * */ export class OpenAIFile extends SpeakeasyBase { + /** + * The size of the file in bytes. + */ @SpeakeasyMetadata() @Expose({ name: "bytes" }) bytes: number; + /** + * The unix timestamp for when the file was created. + */ @SpeakeasyMetadata() @Expose({ name: "created_at" }) createdAt: number; + /** + * The name of the file. + */ @SpeakeasyMetadata() @Expose({ name: "filename" }) filename: string; + /** + * The file identifier, which can be referenced in the API endpoints. + */ @SpeakeasyMetadata() @Expose({ name: "id" }) id: string; + /** + * The object type, which is always "file". + */ @SpeakeasyMetadata() @Expose({ name: "object" }) object: string; + /** + * The intended purpose of the file. Currently, only "fine-tune" is supported. + */ @SpeakeasyMetadata() @Expose({ name: "purpose" }) purpose: string; + /** + * The current status of the file, which can be either `uploaded`, `processed`, `pending`, `error`, `deleting` or `deleted`. + */ @SpeakeasyMetadata() @Expose({ name: "status" }) status?: string; + /** + * Additional details about the status of the file. If the file is in the `error` state, this will include a message describing the error. + * + * @remarks + * + */ @SpeakeasyMetadata() @Expose({ name: "status_details" }) statusDetails?: string; diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 7d61854..86f713d 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,7 +38,7 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.17.1"; + sdkVersion = "2.17.2"; genVersion = "2.83.3"; public constructor(init?: Partial) { From b6de8406e27ab118d17b0d23b9bf79d21e60117c Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Tue, 15 Aug 2023 00:53:19 +0000 Subject: [PATCH 37/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.72.0 --- RELEASES.md | 12 +++++++++++- gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 20 insertions(+), 10 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 33d1fd5..c5ac2f3 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -440,4 +440,14 @@ Based on: ### Generated - [typescript v2.17.2] . ### Releases -- [NPM v2.17.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.17.2 - . \ No newline at end of file +- [NPM v2.17.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.17.2 - . + +## 2023-08-15 00:52:54 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.72.0 (2.84.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.18.0] . +### Releases +- [NPM v2.18.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.18.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 1c1bb30..f225f0a 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: c73282185ebaac1c90cd78ffee98b5f9 docVersion: 2.0.0 - speakeasyVersion: 1.71.0 - generationVersion: 2.83.3 + speakeasyVersion: 1.72.0 + generationVersion: 2.84.1 generation: sdkClassName: gpt sdkFlattening: true @@ -13,9 +13,9 @@ features: typescript: core: 2.82.0 deprecations: 2.81.1 - globalServerURLs: 2.81.1 + globalServerURLs: 2.82.0 typescript: - version: 2.17.2 + version: 2.18.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 2bb1011..0c65b44 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.17.2", + "version": "2.18.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.17.2", + "version": "2.18.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index d16b81c..4a17ae3 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.17.2", + "version": "2.18.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 86f713d..c7e3e72 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.17.2"; - genVersion = "2.83.3"; + sdkVersion = "2.18.0"; + genVersion = "2.84.1"; public constructor(init?: Partial) { Object.assign(this, init); From 668a66299ba9212c7a8b8f6af46d8d6b74e90174 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Thu, 17 Aug 2023 00:52:10 +0000 Subject: [PATCH 38/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.73.1 --- RELEASES.md | 12 +++++++++++- ....md => chatcompletionfunctioncalloption.md} | 2 +- files.gen | 3 ++- gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- .../shared/chatcompletionfunctioncalloption.ts | 18 ++++++++++++++++++ .../shared/createchatcompletionrequest.ts | 12 ------------ src/sdk/models/shared/index.ts | 1 + src/sdk/sdk.ts | 4 ++-- 10 files changed, 42 insertions(+), 24 deletions(-) rename docs/models/shared/{createchatcompletionrequestfunctioncall2.md => chatcompletionfunctioncalloption.md} (95%) create mode 100755 src/sdk/models/shared/chatcompletionfunctioncalloption.ts diff --git a/RELEASES.md b/RELEASES.md index c5ac2f3..ac0308e 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -450,4 +450,14 @@ Based on: ### Generated - [typescript v2.18.0] . ### Releases -- [NPM v2.18.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.18.0 - . \ No newline at end of file +- [NPM v2.18.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.18.0 - . + +## 2023-08-17 00:51:43 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.73.1 (2.84.3) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.18.1] . +### Releases +- [NPM v2.18.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.18.1 - . \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionrequestfunctioncall2.md b/docs/models/shared/chatcompletionfunctioncalloption.md similarity index 95% rename from docs/models/shared/createchatcompletionrequestfunctioncall2.md rename to docs/models/shared/chatcompletionfunctioncalloption.md index 0ab17eb..5ce7048 100755 --- a/docs/models/shared/createchatcompletionrequestfunctioncall2.md +++ b/docs/models/shared/chatcompletionfunctioncalloption.md @@ -1,4 +1,4 @@ -# CreateChatCompletionRequestFunctionCall2 +# ChatCompletionFunctionCallOption Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. diff --git a/files.gen b/files.gen index 0f3953e..d6e41ea 100755 --- a/files.gen +++ b/files.gen @@ -52,6 +52,7 @@ src/sdk/models/shared/chatcompletionresponsemessage.ts src/sdk/models/shared/createchatcompletionrequest.ts src/sdk/models/shared/chatcompletionrequestmessage.ts src/sdk/models/shared/chatcompletionfunctions.ts +src/sdk/models/shared/chatcompletionfunctioncalloption.ts src/sdk/models/shared/createcompletionresponse.ts src/sdk/models/shared/createcompletionrequest.ts src/sdk/models/shared/createeditresponse.ts @@ -126,7 +127,6 @@ docs/models/shared/completionusage.md docs/models/shared/chatcompletionresponsemessagefunctioncall.md docs/models/shared/chatcompletionresponsemessagerole.md docs/models/shared/chatcompletionresponsemessage.md -docs/models/shared/createchatcompletionrequestfunctioncall2.md docs/models/shared/createchatcompletionrequestfunctioncall1.md docs/models/shared/createchatcompletionrequestmodel2.md docs/models/shared/createchatcompletionrequest.md @@ -134,6 +134,7 @@ docs/models/shared/chatcompletionrequestmessagefunctioncall.md docs/models/shared/chatcompletionrequestmessagerole.md docs/models/shared/chatcompletionrequestmessage.md docs/models/shared/chatcompletionfunctions.md +docs/models/shared/chatcompletionfunctioncalloption.md docs/models/shared/createcompletionresponsechoicesfinishreason.md docs/models/shared/createcompletionresponsechoiceslogprobs.md docs/models/shared/createcompletionresponsechoices.md diff --git a/gen.yaml b/gen.yaml index f225f0a..0938add 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,9 +1,9 @@ configVersion: 1.0.0 management: - docChecksum: c73282185ebaac1c90cd78ffee98b5f9 + docChecksum: 4cc092de14513b7428912f74b026c0c7 docVersion: 2.0.0 - speakeasyVersion: 1.72.0 - generationVersion: 2.84.1 + speakeasyVersion: 1.73.1 + generationVersion: 2.84.3 generation: sdkClassName: gpt sdkFlattening: true @@ -15,7 +15,7 @@ features: deprecations: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.18.0 + version: 2.18.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 0c65b44..5f977c3 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.18.0", + "version": "2.18.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.18.0", + "version": "2.18.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 4a17ae3..e8424fb 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.18.0", + "version": "2.18.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/chatcompletionfunctioncalloption.ts b/src/sdk/models/shared/chatcompletionfunctioncalloption.ts new file mode 100755 index 0000000..2d533aa --- /dev/null +++ b/src/sdk/models/shared/chatcompletionfunctioncalloption.ts @@ -0,0 +1,18 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Expose } from "class-transformer"; + +/** + * Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + */ +export class ChatCompletionFunctionCallOption extends SpeakeasyBase { + /** + * The name of the function to call. + */ + @SpeakeasyMetadata() + @Expose({ name: "name" }) + name: string; +} diff --git a/src/sdk/models/shared/createchatcompletionrequest.ts b/src/sdk/models/shared/createchatcompletionrequest.ts index 6d29ace..2f03a44 100755 --- a/src/sdk/models/shared/createchatcompletionrequest.ts +++ b/src/sdk/models/shared/createchatcompletionrequest.ts @@ -7,18 +7,6 @@ import { ChatCompletionFunctions } from "./chatcompletionfunctions"; import { ChatCompletionRequestMessage } from "./chatcompletionrequestmessage"; import { Expose, Type } from "class-transformer"; -/** - * Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. - */ -export class CreateChatCompletionRequestFunctionCall2 extends SpeakeasyBase { - /** - * The name of the function to call. - */ - @SpeakeasyMetadata() - @Expose({ name: "name" }) - name: string; -} - /** * Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. */ diff --git a/src/sdk/models/shared/index.ts b/src/sdk/models/shared/index.ts index 02f2033..c20e423 100755 --- a/src/sdk/models/shared/index.ts +++ b/src/sdk/models/shared/index.ts @@ -2,6 +2,7 @@ * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. */ +export * from "./chatcompletionfunctioncalloption"; export * from "./chatcompletionfunctions"; export * from "./chatcompletionrequestmessage"; export * from "./chatcompletionresponsemessage"; diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index c7e3e72..26191b8 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.18.0"; - genVersion = "2.84.1"; + sdkVersion = "2.18.1"; + genVersion = "2.84.3"; public constructor(init?: Partial) { Object.assign(this, init); From 01648dacbe68ee61748cbbcc99abd95e6d245dc9 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sat, 19 Aug 2023 00:51:17 +0000 Subject: [PATCH 39/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.74.3 --- RELEASES.md | 12 +++- ...eatemoderationresponseresultscategories.md | 22 +++--- ...moderationresponseresultscategoryscores.md | 22 +++--- gen.yaml | 8 +-- package-lock.json | 4 +- package.json | 2 +- .../models/shared/createmoderationresponse.ts | 70 +++++++++++++++++-- src/sdk/sdk.ts | 4 +- 8 files changed, 109 insertions(+), 35 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index ac0308e..fd75e8b 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -460,4 +460,14 @@ Based on: ### Generated - [typescript v2.18.1] . ### Releases -- [NPM v2.18.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.18.1 - . \ No newline at end of file +- [NPM v2.18.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.18.1 - . + +## 2023-08-19 00:50:55 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.74.3 (2.86.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.18.2] . +### Releases +- [NPM v2.18.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.18.2 - . \ No newline at end of file diff --git a/docs/models/shared/createmoderationresponseresultscategories.md b/docs/models/shared/createmoderationresponseresultscategories.md index 73fa1d6..b653fe7 100755 --- a/docs/models/shared/createmoderationresponseresultscategories.md +++ b/docs/models/shared/createmoderationresponseresultscategories.md @@ -5,12 +5,16 @@ A list of the categories, and whether they are flagged or not. ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | -| `hate` | *boolean* | :heavy_check_mark: | Whether the content was flagged as 'hate'. | -| `hateThreatening` | *boolean* | :heavy_check_mark: | Whether the content was flagged as 'hate/threatening'. | -| `selfHarm` | *boolean* | :heavy_check_mark: | Whether the content was flagged as 'self-harm'. | -| `sexual` | *boolean* | :heavy_check_mark: | Whether the content was flagged as 'sexual'. | -| `sexualMinors` | *boolean* | :heavy_check_mark: | Whether the content was flagged as 'sexual/minors'. | -| `violence` | *boolean* | :heavy_check_mark: | Whether the content was flagged as 'violence'. | -| `violenceGraphic` | *boolean* | :heavy_check_mark: | Whether the content was flagged as 'violence/graphic'. | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `harassment` | *boolean* | :heavy_check_mark: | Content that expresses, incites, or promotes harassing language towards any target. | +| `harassmentThreatening` | *boolean* | :heavy_check_mark: | Harassment content that also includes violence or serious harm towards any target. | +| `hate` | *boolean* | :heavy_check_mark: | Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment. | +| `hateThreatening` | *boolean* | :heavy_check_mark: | Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. | +| `selfHarm` | *boolean* | :heavy_check_mark: | Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. | +| `selfHarmInstructions` | *boolean* | :heavy_check_mark: | Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. | +| `selfHarmIntent` | *boolean* | :heavy_check_mark: | Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. | +| `sexual` | *boolean* | :heavy_check_mark: | Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). | +| `sexualMinors` | *boolean* | :heavy_check_mark: | Sexual content that includes an individual who is under 18 years old. | +| `violence` | *boolean* | :heavy_check_mark: | Content that depicts death, violence, or physical injury. | +| `violenceGraphic` | *boolean* | :heavy_check_mark: | Content that depicts death, violence, or physical injury in graphic detail. | \ No newline at end of file diff --git a/docs/models/shared/createmoderationresponseresultscategoryscores.md b/docs/models/shared/createmoderationresponseresultscategoryscores.md index 0681fd9..8eec126 100755 --- a/docs/models/shared/createmoderationresponseresultscategoryscores.md +++ b/docs/models/shared/createmoderationresponseresultscategoryscores.md @@ -5,12 +5,16 @@ A list of the categories along with their scores as predicted by model. ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `hate` | *number* | :heavy_check_mark: | The score for the category 'hate'. | -| `hateThreatening` | *number* | :heavy_check_mark: | The score for the category 'hate/threatening'. | -| `selfHarm` | *number* | :heavy_check_mark: | The score for the category 'self-harm'. | -| `sexual` | *number* | :heavy_check_mark: | The score for the category 'sexual'. | -| `sexualMinors` | *number* | :heavy_check_mark: | The score for the category 'sexual/minors'. | -| `violence` | *number* | :heavy_check_mark: | The score for the category 'violence'. | -| `violenceGraphic` | *number* | :heavy_check_mark: | The score for the category 'violence/graphic'. | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `harassment` | *number* | :heavy_check_mark: | The score for the category 'harassment'. | +| `harassmentThreatening` | *number* | :heavy_check_mark: | The score for the category 'harassment/threatening'. | +| `hate` | *number* | :heavy_check_mark: | The score for the category 'hate'. | +| `hateThreatening` | *number* | :heavy_check_mark: | The score for the category 'hate/threatening'. | +| `selfHarm` | *number* | :heavy_check_mark: | The score for the category 'self-harm'. | +| `selfHarmInstructions` | *number* | :heavy_check_mark: | The score for the category 'self-harm/instructions'. | +| `selfHarmIntent` | *number* | :heavy_check_mark: | The score for the category 'self-harm/intent'. | +| `sexual` | *number* | :heavy_check_mark: | The score for the category 'sexual'. | +| `sexualMinors` | *number* | :heavy_check_mark: | The score for the category 'sexual/minors'. | +| `violence` | *number* | :heavy_check_mark: | The score for the category 'violence'. | +| `violenceGraphic` | *number* | :heavy_check_mark: | The score for the category 'violence/graphic'. | \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 0938add..658f28b 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,9 +1,9 @@ configVersion: 1.0.0 management: - docChecksum: 4cc092de14513b7428912f74b026c0c7 + docChecksum: 572d9e3dcdaa71e8e84657184b7c2c7a docVersion: 2.0.0 - speakeasyVersion: 1.73.1 - generationVersion: 2.84.3 + speakeasyVersion: 1.74.3 + generationVersion: 2.86.6 generation: sdkClassName: gpt sdkFlattening: true @@ -15,7 +15,7 @@ features: deprecations: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.18.1 + version: 2.18.2 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 5f977c3..8e5ed87 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.18.1", + "version": "2.18.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.18.1", + "version": "2.18.2", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index e8424fb..c4c21f2 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.18.1", + "version": "2.18.2", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/createmoderationresponse.ts b/src/sdk/models/shared/createmoderationresponse.ts index 3a5384a..a0d9c1d 100755 --- a/src/sdk/models/shared/createmoderationresponse.ts +++ b/src/sdk/models/shared/createmoderationresponse.ts @@ -10,49 +10,77 @@ import { Expose, Type } from "class-transformer"; */ export class CreateModerationResponseResultsCategories extends SpeakeasyBase { /** - * Whether the content was flagged as 'hate'. + * Content that expresses, incites, or promotes harassing language towards any target. + */ + @SpeakeasyMetadata() + @Expose({ name: "harassment" }) + harassment: boolean; + + /** + * Harassment content that also includes violence or serious harm towards any target. + */ + @SpeakeasyMetadata() + @Expose({ name: "harassment/threatening" }) + harassmentThreatening: boolean; + + /** + * Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment. */ @SpeakeasyMetadata() @Expose({ name: "hate" }) hate: boolean; /** - * Whether the content was flagged as 'hate/threatening'. + * Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. */ @SpeakeasyMetadata() @Expose({ name: "hate/threatening" }) hateThreatening: boolean; /** - * Whether the content was flagged as 'self-harm'. + * Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. */ @SpeakeasyMetadata() @Expose({ name: "self-harm" }) selfHarm: boolean; /** - * Whether the content was flagged as 'sexual'. + * Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + */ + @SpeakeasyMetadata() + @Expose({ name: "self-harm/instructions" }) + selfHarmInstructions: boolean; + + /** + * Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + */ + @SpeakeasyMetadata() + @Expose({ name: "self-harm/intent" }) + selfHarmIntent: boolean; + + /** + * Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). */ @SpeakeasyMetadata() @Expose({ name: "sexual" }) sexual: boolean; /** - * Whether the content was flagged as 'sexual/minors'. + * Sexual content that includes an individual who is under 18 years old. */ @SpeakeasyMetadata() @Expose({ name: "sexual/minors" }) sexualMinors: boolean; /** - * Whether the content was flagged as 'violence'. + * Content that depicts death, violence, or physical injury. */ @SpeakeasyMetadata() @Expose({ name: "violence" }) violence: boolean; /** - * Whether the content was flagged as 'violence/graphic'. + * Content that depicts death, violence, or physical injury in graphic detail. */ @SpeakeasyMetadata() @Expose({ name: "violence/graphic" }) @@ -63,6 +91,20 @@ export class CreateModerationResponseResultsCategories extends SpeakeasyBase { * A list of the categories along with their scores as predicted by model. */ export class CreateModerationResponseResultsCategoryScores extends SpeakeasyBase { + /** + * The score for the category 'harassment'. + */ + @SpeakeasyMetadata() + @Expose({ name: "harassment" }) + harassment: number; + + /** + * The score for the category 'harassment/threatening'. + */ + @SpeakeasyMetadata() + @Expose({ name: "harassment/threatening" }) + harassmentThreatening: number; + /** * The score for the category 'hate'. */ @@ -84,6 +126,20 @@ export class CreateModerationResponseResultsCategoryScores extends SpeakeasyBase @Expose({ name: "self-harm" }) selfHarm: number; + /** + * The score for the category 'self-harm/instructions'. + */ + @SpeakeasyMetadata() + @Expose({ name: "self-harm/instructions" }) + selfHarmInstructions: number; + + /** + * The score for the category 'self-harm/intent'. + */ + @SpeakeasyMetadata() + @Expose({ name: "self-harm/intent" }) + selfHarmIntent: number; + /** * The score for the category 'sexual'. */ diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 26191b8..555fef2 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.18.1"; - genVersion = "2.84.3"; + sdkVersion = "2.18.2"; + genVersion = "2.86.6"; public constructor(init?: Partial) { Object.assign(this, init); From 40c876aee3956021c9b33ca695e23bde1d8d5f24 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 25 Aug 2023 00:54:14 +0000 Subject: [PATCH 40/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.74.11 --- RELEASES.md | 12 +++++++++++- gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 20 insertions(+), 10 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index fd75e8b..27d50bc 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -470,4 +470,14 @@ Based on: ### Generated - [typescript v2.18.2] . ### Releases -- [NPM v2.18.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.18.2 - . \ No newline at end of file +- [NPM v2.18.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.18.2 - . + +## 2023-08-25 00:53:52 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.74.11 (2.87.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.18.3] . +### Releases +- [NPM v2.18.3] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.18.3 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 658f28b..034f63e 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: 572d9e3dcdaa71e8e84657184b7c2c7a docVersion: 2.0.0 - speakeasyVersion: 1.74.3 - generationVersion: 2.86.6 + speakeasyVersion: 1.74.11 + generationVersion: 2.87.1 generation: sdkClassName: gpt sdkFlattening: true @@ -11,11 +11,11 @@ generation: telemetryEnabled: false features: typescript: - core: 2.82.0 + core: 2.82.1 deprecations: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.18.2 + version: 2.18.3 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 8e5ed87..869e98f 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.18.2", + "version": "2.18.3", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.18.2", + "version": "2.18.3", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index c4c21f2..33113f4 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.18.2", + "version": "2.18.3", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 555fef2..556afcb 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.18.2"; - genVersion = "2.86.6"; + sdkVersion = "2.18.3"; + genVersion = "2.87.1"; public constructor(init?: Partial) { Object.assign(this, init); From edc94ceb8ed238e84a8a2610e161bf0499c1eec9 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sat, 26 Aug 2023 00:52:06 +0000 Subject: [PATCH 41/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.74.16 --- RELEASES.md | 12 +++++++++++- gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 3 ++- src/sdk/sdk.ts | 4 ++-- 5 files changed, 21 insertions(+), 10 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 27d50bc..6be57b4 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -480,4 +480,14 @@ Based on: ### Generated - [typescript v2.18.3] . ### Releases -- [NPM v2.18.3] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.18.3 - . \ No newline at end of file +- [NPM v2.18.3] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.18.3 - . + +## 2023-08-26 00:51:39 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.74.16 (2.88.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.19.0] . +### Releases +- [NPM v2.19.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.19.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 034f63e..d8c9b4b 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: 572d9e3dcdaa71e8e84657184b7c2c7a docVersion: 2.0.0 - speakeasyVersion: 1.74.11 - generationVersion: 2.87.1 + speakeasyVersion: 1.74.16 + generationVersion: 2.88.2 generation: sdkClassName: gpt sdkFlattening: true @@ -11,11 +11,11 @@ generation: telemetryEnabled: false features: typescript: - core: 2.82.1 + core: 2.83.1 deprecations: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.18.3 + version: 2.19.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 869e98f..12bb586 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.18.3", + "version": "2.19.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.18.3", + "version": "2.19.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 33113f4..985a568 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.18.3", + "version": "2.19.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" @@ -23,6 +23,7 @@ "main": "dist/index.js", "files": [ "dist", + "docs", "README.md" ], "repository": { diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 556afcb..e71753a 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.18.3"; - genVersion = "2.87.1"; + sdkVersion = "2.19.0"; + genVersion = "2.88.2"; public constructor(init?: Partial) { Object.assign(this, init); From b19936e3c52bfc2bd56c41bee9f1b3492fab0d41 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Thu, 31 Aug 2023 00:54:33 +0000 Subject: [PATCH 42/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.76.1 --- .gitattributes | 2 ++ RELEASES.md | 12 +++++++++++- files.gen | 3 ++- gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 7 files changed, 24 insertions(+), 11 deletions(-) create mode 100755 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100755 index 0000000..113eead --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# This allows generated code to be indexed correctly +*.ts linguist-generated=false \ No newline at end of file diff --git a/RELEASES.md b/RELEASES.md index 6be57b4..fb2fa35 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -490,4 +490,14 @@ Based on: ### Generated - [typescript v2.19.0] . ### Releases -- [NPM v2.19.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.19.0 - . \ No newline at end of file +- [NPM v2.19.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.19.0 - . + +## 2023-08-31 00:54:10 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.76.1 (2.89.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.20.0] . +### Releases +- [NPM v2.20.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.20.0 - . \ No newline at end of file diff --git a/files.gen b/files.gen index d6e41ea..552ece1 100755 --- a/files.gen +++ b/files.gen @@ -190,4 +190,5 @@ docs/models/shared/listfilesresponse.md docs/models/shared/listfinetuneeventsresponse.md docs/models/shared/listfinetunesresponse.md docs/models/shared/listmodelsresponse.md -docs/models/shared/model.md \ No newline at end of file +docs/models/shared/model.md +.gitattributes \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index d8c9b4b..e835952 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: 572d9e3dcdaa71e8e84657184b7c2c7a docVersion: 2.0.0 - speakeasyVersion: 1.74.16 - generationVersion: 2.88.2 + speakeasyVersion: 1.76.1 + generationVersion: 2.89.1 generation: sdkClassName: gpt sdkFlattening: true @@ -11,11 +11,11 @@ generation: telemetryEnabled: false features: typescript: - core: 2.83.1 + core: 2.84.1 deprecations: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.19.0 + version: 2.20.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 12bb586..759933e 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.19.0", + "version": "2.20.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.19.0", + "version": "2.20.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 985a568..0d876d4 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.19.0", + "version": "2.20.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index e71753a..bbe59ae 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.19.0"; - genVersion = "2.88.2"; + sdkVersion = "2.20.0"; + genVersion = "2.89.1"; public constructor(init?: Partial) { Object.assign(this, init); From 1c4b70a4c1a15a0764c3a34233f393cd326dd26f Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 1 Sep 2023 00:57:53 +0000 Subject: [PATCH 43/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.77.0 --- RELEASES.md | 12 +++++++++++- gen.yaml | 10 +++++----- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 5 files changed, 21 insertions(+), 11 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index fb2fa35..8f565d7 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -500,4 +500,14 @@ Based on: ### Generated - [typescript v2.20.0] . ### Releases -- [NPM v2.20.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.20.0 - . \ No newline at end of file +- [NPM v2.20.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.20.0 - . + +## 2023-09-01 00:57:30 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.77.0 (2.91.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.21.0] . +### Releases +- [NPM v2.21.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.21.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index e835952..8cd6ccc 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,9 +1,9 @@ configVersion: 1.0.0 management: - docChecksum: 572d9e3dcdaa71e8e84657184b7c2c7a + docChecksum: 25bfe8c5a77a1c2cf6a1be5713abc57e docVersion: 2.0.0 - speakeasyVersion: 1.76.1 - generationVersion: 2.89.1 + speakeasyVersion: 1.77.0 + generationVersion: 2.91.2 generation: sdkClassName: gpt sdkFlattening: true @@ -11,11 +11,11 @@ generation: telemetryEnabled: false features: typescript: - core: 2.84.1 + core: 2.85.0 deprecations: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.20.0 + version: 2.21.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 759933e..e2b603d 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.20.0", + "version": "2.21.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.20.0", + "version": "2.21.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 0d876d4..0bc3080 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.20.0", + "version": "2.21.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index bbe59ae..64cd83a 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.20.0"; - genVersion = "2.89.1"; + sdkVersion = "2.21.0"; + genVersion = "2.91.2"; public constructor(init?: Partial) { Object.assign(this, init); From 8b46deaef5e6af5e90278b20b0acce256eecef34 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sat, 2 Sep 2023 00:52:30 +0000 Subject: [PATCH 44/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.77.2 --- README.md | 34 +- RELEASES.md | 12 +- .../operations/cancelfinetuningjobrequest.md | 8 + .../operations/cancelfinetuningjobresponse.md | 11 + .../operations/createfinetuningjobresponse.md | 11 + docs/models/operations/deletemodelrequest.md | 6 +- .../operations/listfinetuningeventsrequest.md | 10 + .../listfinetuningeventsresponse.md | 11 + .../listpaginatedfinetuningjobsrequest.md | 9 + .../listpaginatedfinetuningjobsresponse.md | 11 + .../retrievefinetuningjobrequest.md | 8 + .../retrievefinetuningjobresponse.md | 11 + .../models/operations/retrievemodelrequest.md | 2 +- .../shared/createchatcompletionrequest.md | 4 +- .../shared/createchatcompletionresponse.md | 2 +- docs/models/shared/createcompletionrequest.md | 4 +- .../shared/createcompletionrequestmodel2.md | 2 + .../models/shared/createcompletionresponse.md | 2 +- docs/models/shared/createeditresponse.md | 6 +- docs/models/shared/createfilerequest.md | 8 +- docs/models/shared/createfinetunerequest.md | 28 +- .../shared/createfinetunerequestmodel2.md | 4 +- .../shared/createfinetuningjobrequest.md | 12 + ...eatefinetuningjobrequesthyperparameters.md | 10 + ...tuningjobrequesthyperparametersnepochs1.md | 12 + .../createfinetuningjobrequestmodel2.md | 14 + docs/models/shared/finetune.md | 36 +- docs/models/shared/finetunehyperparams.md | 2 +- docs/models/shared/finetuningjob.md | 23 + docs/models/shared/finetuningjobevent.md | 11 + docs/models/shared/finetuningjobeventlevel.md | 10 + .../shared/finetuningjobhyperparameters.md | 10 + .../finetuningjobhyperparametersnepochs1.md | 11 + .../shared/listfinetuningjobeventsresponse.md | 11 + .../listpaginatedfinetuningjobsresponse.md | 12 + docs/models/shared/model.md | 2 +- docs/models/shared/openaifile.md | 2 +- docs/sdks/openai/README.md | 299 +++++++++++-- files.gen | 30 ++ gen.yaml | 10 +- package-lock.json | 4 +- package.json | 2 +- .../models/operations/cancelfinetuningjob.ts | 35 ++ .../models/operations/createfinetuningjob.ts | 24 ++ src/sdk/models/operations/index.ts | 5 + .../models/operations/listfinetuningevents.ts | 47 +++ .../operations/listpaginatedfinetuningjobs.ts | 38 ++ .../operations/retrievefinetuningjob.ts | 35 ++ .../shared/createchatcompletionrequest.ts | 4 +- .../shared/createchatcompletionresponse.ts | 2 +- .../models/shared/createcompletionrequest.ts | 6 +- .../models/shared/createcompletionresponse.ts | 2 +- src/sdk/models/shared/createeditresponse.ts | 4 +- src/sdk/models/shared/createfilerequest.ts | 4 +- .../models/shared/createfinetunerequest.ts | 16 +- .../shared/createfinetuningjobrequest.ts | 114 +++++ src/sdk/models/shared/finetune.ts | 14 +- src/sdk/models/shared/finetuningjob.ts | 133 ++++++ src/sdk/models/shared/finetuningjobevent.ts | 30 ++ src/sdk/models/shared/index.ts | 5 + .../shared/listfinetuningjobeventsresponse.ts | 21 + .../listpaginatedfinetuningjobsresponse.ts | 25 ++ src/sdk/models/shared/model.ts | 2 +- src/sdk/models/shared/openaifile.ts | 2 +- src/sdk/openai.ts | 399 +++++++++++++++++- src/sdk/sdk.ts | 4 +- 66 files changed, 1559 insertions(+), 139 deletions(-) create mode 100755 docs/models/operations/cancelfinetuningjobrequest.md create mode 100755 docs/models/operations/cancelfinetuningjobresponse.md create mode 100755 docs/models/operations/createfinetuningjobresponse.md create mode 100755 docs/models/operations/listfinetuningeventsrequest.md create mode 100755 docs/models/operations/listfinetuningeventsresponse.md create mode 100755 docs/models/operations/listpaginatedfinetuningjobsrequest.md create mode 100755 docs/models/operations/listpaginatedfinetuningjobsresponse.md create mode 100755 docs/models/operations/retrievefinetuningjobrequest.md create mode 100755 docs/models/operations/retrievefinetuningjobresponse.md create mode 100755 docs/models/shared/createfinetuningjobrequest.md create mode 100755 docs/models/shared/createfinetuningjobrequesthyperparameters.md create mode 100755 docs/models/shared/createfinetuningjobrequesthyperparametersnepochs1.md create mode 100755 docs/models/shared/createfinetuningjobrequestmodel2.md create mode 100755 docs/models/shared/finetuningjob.md create mode 100755 docs/models/shared/finetuningjobevent.md create mode 100755 docs/models/shared/finetuningjobeventlevel.md create mode 100755 docs/models/shared/finetuningjobhyperparameters.md create mode 100755 docs/models/shared/finetuningjobhyperparametersnepochs1.md create mode 100755 docs/models/shared/listfinetuningjobeventsresponse.md create mode 100755 docs/models/shared/listpaginatedfinetuningjobsresponse.md create mode 100755 src/sdk/models/operations/cancelfinetuningjob.ts create mode 100755 src/sdk/models/operations/createfinetuningjob.ts create mode 100755 src/sdk/models/operations/listfinetuningevents.ts create mode 100755 src/sdk/models/operations/listpaginatedfinetuningjobs.ts create mode 100755 src/sdk/models/operations/retrievefinetuningjob.ts create mode 100755 src/sdk/models/shared/createfinetuningjobrequest.ts create mode 100755 src/sdk/models/shared/finetuningjob.ts create mode 100755 src/sdk/models/shared/finetuningjobevent.ts create mode 100755 src/sdk/models/shared/listfinetuningjobeventsresponse.ts create mode 100755 src/sdk/models/shared/listpaginatedfinetuningjobsresponse.ts diff --git a/README.md b/README.md index 14eb585..e27544d 100755 --- a/README.md +++ b/README.md @@ -63,7 +63,9 @@ sdk.openAI.cancelFineTune({ ### [openAI](docs/sdks/openai/README.md) -* [cancelFineTune](docs/sdks/openai/README.md#cancelfinetune) - Immediately cancel a fine-tune job. +* [~~cancelFineTune~~](docs/sdks/openai/README.md#cancelfinetune) - Immediately cancel a fine-tune job. + :warning: **Deprecated** +* [cancelFineTuningJob](docs/sdks/openai/README.md#cancelfinetuningjob) - Immediately cancel a fine-tune job. * [createChatCompletion](docs/sdks/openai/README.md#createchatcompletion) - Creates a model response for the given chat conversation. * [createCompletion](docs/sdks/openai/README.md#createcompletion) - Creates a completion for the provided prompt and parameters. @@ -71,11 +73,17 @@ sdk.openAI.cancelFineTune({ * [createEmbedding](docs/sdks/openai/README.md#createembedding) - Creates an embedding vector representing the input text. * [createFile](docs/sdks/openai/README.md#createfile) - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. -* [createFineTune](docs/sdks/openai/README.md#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. +* [~~createFineTune~~](docs/sdks/openai/README.md#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. -[Learn more about Fine-tuning](/docs/guides/fine-tuning) +[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + :warning: **Deprecated** +* [createFineTuningJob](docs/sdks/openai/README.md#createfinetuningjob) - Creates a job that fine-tunes a specified model from a given dataset. + +Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + +[Learn more about fine-tuning](/docs/guides/fine-tuning) * [createImage](docs/sdks/openai/README.md#createimage) - Creates an image given a prompt. * [createImageEdit](docs/sdks/openai/README.md#createimageedit) - Creates an edited or extended image given an original image and a prompt. @@ -84,18 +92,26 @@ Response includes details of the enqueued job including job status and the name * [createTranscription](docs/sdks/openai/README.md#createtranscription) - Transcribes audio into the input language. * [createTranslation](docs/sdks/openai/README.md#createtranslation) - Translates audio into English. * [deleteFile](docs/sdks/openai/README.md#deletefile) - Delete a file. -* [deleteModel](docs/sdks/openai/README.md#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization. +* [deleteModel](docs/sdks/openai/README.md#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. * [downloadFile](docs/sdks/openai/README.md#downloadfile) - Returns the contents of the specified file * [listFiles](docs/sdks/openai/README.md#listfiles) - Returns a list of files that belong to the user's organization. -* [listFineTuneEvents](docs/sdks/openai/README.md#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. - -* [listFineTunes](docs/sdks/openai/README.md#listfinetunes) - List your organization's fine-tuning jobs +* [~~listFineTuneEvents~~](docs/sdks/openai/README.md#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. + :warning: **Deprecated** +* [~~listFineTunes~~](docs/sdks/openai/README.md#listfinetunes) - List your organization's fine-tuning jobs + :warning: **Deprecated** +* [listFineTuningEvents](docs/sdks/openai/README.md#listfinetuningevents) - Get status updates for a fine-tuning job. * [listModels](docs/sdks/openai/README.md#listmodels) - Lists the currently available models, and provides basic information about each one such as the owner and availability. +* [listPaginatedFineTuningJobs](docs/sdks/openai/README.md#listpaginatedfinetuningjobs) - List your organization's fine-tuning jobs + * [retrieveFile](docs/sdks/openai/README.md#retrievefile) - Returns information about a specific file. -* [retrieveFineTune](docs/sdks/openai/README.md#retrievefinetune) - Gets info about the fine-tune job. +* [~~retrieveFineTune~~](docs/sdks/openai/README.md#retrievefinetune) - Gets info about the fine-tune job. + +[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + :warning: **Deprecated** +* [retrieveFineTuningJob](docs/sdks/openai/README.md#retrievefinetuningjob) - Get info about a fine-tuning job. -[Learn more about Fine-tuning](/docs/guides/fine-tuning) +[Learn more about fine-tuning](/docs/guides/fine-tuning) * [retrieveModel](docs/sdks/openai/README.md#retrievemodel) - Retrieves a model instance, providing basic information about the model such as the owner and permissioning. diff --git a/RELEASES.md b/RELEASES.md index 8f565d7..007921f 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -510,4 +510,14 @@ Based on: ### Generated - [typescript v2.21.0] . ### Releases -- [NPM v2.21.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.21.0 - . \ No newline at end of file +- [NPM v2.21.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.21.0 - . + +## 2023-09-02 00:52:07 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.77.2 (2.93.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.21.1] . +### Releases +- [NPM v2.21.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.21.1 - . \ No newline at end of file diff --git a/docs/models/operations/cancelfinetuningjobrequest.md b/docs/models/operations/cancelfinetuningjobrequest.md new file mode 100755 index 0000000..07bc63b --- /dev/null +++ b/docs/models/operations/cancelfinetuningjobrequest.md @@ -0,0 +1,8 @@ +# CancelFineTuningJobRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `fineTuningJobId` | *string* | :heavy_check_mark: | The ID of the fine-tuning job to cancel
| ft-AF1WoRqd3aJAHsqc9NY7iL8F | \ No newline at end of file diff --git a/docs/models/operations/cancelfinetuningjobresponse.md b/docs/models/operations/cancelfinetuningjobresponse.md new file mode 100755 index 0000000..45c34c3 --- /dev/null +++ b/docs/models/operations/cancelfinetuningjobresponse.md @@ -0,0 +1,11 @@ +# CancelFineTuningJobResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `fineTuningJob` | [shared.FineTuningJob](../../models/shared/finetuningjob.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createfinetuningjobresponse.md b/docs/models/operations/createfinetuningjobresponse.md new file mode 100755 index 0000000..b191147 --- /dev/null +++ b/docs/models/operations/createfinetuningjobresponse.md @@ -0,0 +1,11 @@ +# CreateFineTuningJobResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `fineTuningJob` | [shared.FineTuningJob](../../models/shared/finetuningjob.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/deletemodelrequest.md b/docs/models/operations/deletemodelrequest.md index ce2c4d5..42f0262 100755 --- a/docs/models/operations/deletemodelrequest.md +++ b/docs/models/operations/deletemodelrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | -| `model` | *string* | :heavy_check_mark: | The model to delete | curie:ft-acmeco-2021-03-03-21-44-20 | \ No newline at end of file +| Field | Type | Required | Description | Example | +| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | +| `model` | *string* | :heavy_check_mark: | The model to delete | ft:gpt-3.5-turbo:acemeco:suffix:abc123 | \ No newline at end of file diff --git a/docs/models/operations/listfinetuningeventsrequest.md b/docs/models/operations/listfinetuningeventsrequest.md new file mode 100755 index 0000000..b226de2 --- /dev/null +++ b/docs/models/operations/listfinetuningeventsrequest.md @@ -0,0 +1,10 @@ +# ListFineTuningEventsRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `after` | *string* | :heavy_minus_sign: | Identifier for the last event from the previous pagination request. | | +| `fineTuningJobId` | *string* | :heavy_check_mark: | The ID of the fine-tuning job to get events for.
| ft-AF1WoRqd3aJAHsqc9NY7iL8F | +| `limit` | *number* | :heavy_minus_sign: | Number of events to retrieve. | | \ No newline at end of file diff --git a/docs/models/operations/listfinetuningeventsresponse.md b/docs/models/operations/listfinetuningeventsresponse.md new file mode 100755 index 0000000..3407008 --- /dev/null +++ b/docs/models/operations/listfinetuningeventsresponse.md @@ -0,0 +1,11 @@ +# ListFineTuningEventsResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `listFineTuningJobEventsResponse` | [shared.ListFineTuningJobEventsResponse](../../models/shared/listfinetuningjobeventsresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listpaginatedfinetuningjobsrequest.md b/docs/models/operations/listpaginatedfinetuningjobsrequest.md new file mode 100755 index 0000000..b1ca0a9 --- /dev/null +++ b/docs/models/operations/listpaginatedfinetuningjobsrequest.md @@ -0,0 +1,9 @@ +# ListPaginatedFineTuningJobsRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | +| `after` | *string* | :heavy_minus_sign: | Identifier for the last job from the previous pagination request. | +| `limit` | *number* | :heavy_minus_sign: | Number of fine-tuning jobs to retrieve. | \ No newline at end of file diff --git a/docs/models/operations/listpaginatedfinetuningjobsresponse.md b/docs/models/operations/listpaginatedfinetuningjobsresponse.md new file mode 100755 index 0000000..ea1f598 --- /dev/null +++ b/docs/models/operations/listpaginatedfinetuningjobsresponse.md @@ -0,0 +1,11 @@ +# ListPaginatedFineTuningJobsResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `listPaginatedFineTuningJobsResponse` | [shared.ListPaginatedFineTuningJobsResponse](../../models/shared/listpaginatedfinetuningjobsresponse.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/retrievefinetuningjobrequest.md b/docs/models/operations/retrievefinetuningjobrequest.md new file mode 100755 index 0000000..b509088 --- /dev/null +++ b/docs/models/operations/retrievefinetuningjobrequest.md @@ -0,0 +1,8 @@ +# RetrieveFineTuningJobRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `fineTuningJobId` | *string* | :heavy_check_mark: | The ID of the fine-tuning job
| ft-AF1WoRqd3aJAHsqc9NY7iL8F | \ No newline at end of file diff --git a/docs/models/operations/retrievefinetuningjobresponse.md b/docs/models/operations/retrievefinetuningjobresponse.md new file mode 100755 index 0000000..df145b5 --- /dev/null +++ b/docs/models/operations/retrievefinetuningjobresponse.md @@ -0,0 +1,11 @@ +# RetrieveFineTuningJobResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `fineTuningJob` | [shared.FineTuningJob](../../models/shared/finetuningjob.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/retrievemodelrequest.md b/docs/models/operations/retrievemodelrequest.md index b52c4f8..8a8dfda 100755 --- a/docs/models/operations/retrievemodelrequest.md +++ b/docs/models/operations/retrievemodelrequest.md @@ -5,4 +5,4 @@ | Field | Type | Required | Description | Example | | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | -| `model` | *string* | :heavy_check_mark: | The ID of the model to use for this request | text-davinci-001 | \ No newline at end of file +| `model` | *string* | :heavy_check_mark: | The ID of the model to use for this request | gpt-3.5-turbo | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionrequest.md b/docs/models/shared/createchatcompletionrequest.md index 12e12c1..b616f9a 100755 --- a/docs/models/shared/createchatcompletionrequest.md +++ b/docs/models/shared/createchatcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `frequencyPenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.

[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
| | +| `frequencyPenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.

[See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details)
| | | `functionCall` | *any* | :heavy_minus_sign: | Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. | | | `functions` | [ChatCompletionFunctions](../../models/shared/chatcompletionfunctions.md)[] | :heavy_minus_sign: | A list of functions the model may generate JSON inputs for. | | | `logitBias` | Record | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
| | @@ -13,7 +13,7 @@ | `messages` | [ChatCompletionRequestMessage](../../models/shared/chatcompletionrequestmessage.md)[] | :heavy_check_mark: | A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). | | | `model` | *any* | :heavy_check_mark: | ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. | | | `n` | *number* | :heavy_minus_sign: | How many chat completion choices to generate for each input message. | 1 | -| `presencePenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.

[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
| | +| `presencePenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.

[See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details)
| | | `stop` | *any* | :heavy_minus_sign: | Up to 4 sequences where the API will stop generating further tokens.
| | | `stream` | *boolean* | :heavy_minus_sign: | If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
| | | `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

We generally recommend altering this or `top_p` but not both.
| 1 | diff --git a/docs/models/shared/createchatcompletionresponse.md b/docs/models/shared/createchatcompletionresponse.md index ac6bda0..8048de9 100755 --- a/docs/models/shared/createchatcompletionresponse.md +++ b/docs/models/shared/createchatcompletionresponse.md @@ -8,7 +8,7 @@ Represents a chat completion response returned by model, based on the provided i | Field | Type | Required | Description | | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | | `choices` | [CreateChatCompletionResponseChoices](../../models/shared/createchatcompletionresponsechoices.md)[] | :heavy_check_mark: | A list of chat completion choices. Can be more than one if `n` is greater than 1. | -| `created` | *number* | :heavy_check_mark: | A unix timestamp of when the chat completion was created. | +| `created` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) of when the chat completion was created. | | `id` | *string* | :heavy_check_mark: | A unique identifier for the chat completion. | | `model` | *string* | :heavy_check_mark: | The model used for the chat completion. | | `object` | *string* | :heavy_check_mark: | The object type, which is always `chat.completion`. | diff --git a/docs/models/shared/createcompletionrequest.md b/docs/models/shared/createcompletionrequest.md index 0fb659a..9080a5e 100755 --- a/docs/models/shared/createcompletionrequest.md +++ b/docs/models/shared/createcompletionrequest.md @@ -7,13 +7,13 @@ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | `bestOf` | *number* | :heavy_minus_sign: | Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed.

When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.

**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
| | | `echo` | *boolean* | :heavy_minus_sign: | Echo back the prompt in addition to the completion
| | -| `frequencyPenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.

[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
| | +| `frequencyPenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.

[See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details)
| | | `logitBias` | Record | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.

As an example, you can pass `{"50256": -100}` to prevent the <\|endoftext\|> token from being generated.
| | | `logprobs` | *number* | :heavy_minus_sign: | Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.

The maximum value for `logprobs` is 5.
| | | `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the completion.

The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| 16 | | `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
| | | `n` | *number* | :heavy_minus_sign: | How many completions to generate for each prompt.

**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
| 1 | -| `presencePenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.

[See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
| | +| `presencePenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.

[See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details)
| | | `prompt` | *any* | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.

Note that <\|endoftext\|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
| | | `stop` | *any* | :heavy_minus_sign: | Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
| | | `stream` | *boolean* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
| | diff --git a/docs/models/shared/createcompletionrequestmodel2.md b/docs/models/shared/createcompletionrequestmodel2.md index 8f81c3e..1702d82 100755 --- a/docs/models/shared/createcompletionrequestmodel2.md +++ b/docs/models/shared/createcompletionrequestmodel2.md @@ -8,6 +8,8 @@ ID of the model to use. You can use the [List models](/docs/api-reference/models | Name | Value | | ---------------- | ---------------- | +| `Babbage002` | babbage-002 | +| `Davinci002` | davinci-002 | | `TextDavinci003` | text-davinci-003 | | `TextDavinci002` | text-davinci-002 | | `TextDavinci001` | text-davinci-001 | diff --git a/docs/models/shared/createcompletionresponse.md b/docs/models/shared/createcompletionresponse.md index 643c404..a392723 100755 --- a/docs/models/shared/createcompletionresponse.md +++ b/docs/models/shared/createcompletionresponse.md @@ -9,7 +9,7 @@ Represents a completion response from the API. Note: both the streamed and non-s | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | | `choices` | [CreateCompletionResponseChoices](../../models/shared/createcompletionresponsechoices.md)[] | :heavy_check_mark: | The list of completion choices the model generated for the input prompt. | -| `created` | *number* | :heavy_check_mark: | The Unix timestamp of when the completion was created. | +| `created` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) of when the completion was created. | | `id` | *string* | :heavy_check_mark: | A unique identifier for the completion. | | `model` | *string* | :heavy_check_mark: | The model used for completion. | | `object` | *string* | :heavy_check_mark: | The object type, which is always "text_completion" | diff --git a/docs/models/shared/createeditresponse.md b/docs/models/shared/createeditresponse.md index 2b3d57f..22c1e0c 100755 --- a/docs/models/shared/createeditresponse.md +++ b/docs/models/shared/createeditresponse.md @@ -1,13 +1,15 @@ -# CreateEditResponse +# ~~CreateEditResponse~~ OK +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + ## Fields | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | | `choices` | [CreateEditResponseChoices](../../models/shared/createeditresponsechoices.md)[] | :heavy_check_mark: | A list of edit choices. Can be more than one if `n` is greater than 1. | -| `created` | *number* | :heavy_check_mark: | A unix timestamp of when the edit was created. | +| `created` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) of when the edit was created. | | `object` | *string* | :heavy_check_mark: | The object type, which is always `edit`. | | `usage` | [CompletionUsage](../../models/shared/completionusage.md) | :heavy_check_mark: | Usage statistics for the completion request. | \ No newline at end of file diff --git a/docs/models/shared/createfilerequest.md b/docs/models/shared/createfilerequest.md index 4d20d62..e9f4d06 100755 --- a/docs/models/shared/createfilerequest.md +++ b/docs/models/shared/createfilerequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `file` | [CreateFileRequestFile](../../models/shared/createfilerequestfile.md) | :heavy_check_mark: | Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded.

If the `purpose` is set to "fine-tune", each line is a JSON record with "prompt" and "completion" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
| -| `purpose` | *string* | :heavy_check_mark: | The intended purpose of the uploaded documents.

Use "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
| \ No newline at end of file +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [CreateFileRequestFile](../../models/shared/createfilerequestfile.md) | :heavy_check_mark: | Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded.

If the `purpose` is set to "fine-tune", the file will be used for fine-tuning.
| +| `purpose` | *string* | :heavy_check_mark: | The intended purpose of the uploaded documents.

Use "fine-tune" for [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file.
| \ No newline at end of file diff --git a/docs/models/shared/createfinetunerequest.md b/docs/models/shared/createfinetunerequest.md index aaa9031..7a8838e 100755 --- a/docs/models/shared/createfinetunerequest.md +++ b/docs/models/shared/createfinetunerequest.md @@ -3,17 +3,17 @@ ## Fields -| Field | Type | Required | Description | Example | -| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `batchSize` | *number* | :heavy_minus_sign: | The batch size to use for training. The batch size is the number of
training examples used to train a single forward and backward pass.

By default, the batch size will be dynamically configured to be
~0.2% of the number of examples in the training set, capped at 256 -
in general, we've found that larger batch sizes tend to work better
for larger datasets.
| | -| `classificationBetas` | *number*[] | :heavy_minus_sign: | If this is provided, we calculate F-beta scores at the specified
beta values. The F-beta score is a generalization of F-1 score.
This is only used for binary classification.

With a beta of 1 (i.e. the F-1 score), precision and recall are
given the same weight. A larger beta score puts more weight on
recall and less on precision. A smaller beta score puts more weight
on precision and less on recall.
| | -| `classificationNClasses` | *number* | :heavy_minus_sign: | The number of classes in a classification task.

This parameter is required for multiclass classification.
| | -| `classificationPositiveClass` | *string* | :heavy_minus_sign: | The positive class in binary classification.

This parameter is needed to generate precision, recall, and F1
metrics when doing binary classification.
| | -| `computeClassificationMetrics` | *boolean* | :heavy_minus_sign: | If set, we calculate classification-specific metrics such as accuracy
and F-1 score using the validation set at the end of every epoch.
These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model).

In order to compute classification metrics, you must provide a
`validation_file`. Additionally, you must
specify `classification_n_classes` for multiclass classification or
`classification_positive_class` for binary classification.
| | -| `learningRateMultiplier` | *number* | :heavy_minus_sign: | The learning rate multiplier to use for training.
The fine-tuning learning rate is the original learning rate used for
pretraining multiplied by this value.

By default, the learning rate multiplier is the 0.05, 0.1, or 0.2
depending on final `batch_size` (larger learning rates tend to
perform better with larger batch sizes). We recommend experimenting
with values in the range 0.02 to 0.2 to see what produces the best
results.
| | -| `model` | *any* | :heavy_minus_sign: | The name of the base model to fine-tune. You can select one of "ada",
"babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21.
To learn more about these models, see the
[Models](https://platform.openai.com/docs/models) documentation.
| | -| `nEpochs` | *number* | :heavy_minus_sign: | The number of epochs to train the model for. An epoch refers to one
full cycle through the training dataset.
| | -| `promptLossWeight` | *number* | :heavy_minus_sign: | The weight to use for loss on the prompt tokens. This controls how
much the model tries to learn to generate the prompt (as compared
to the completion which always has a weight of 1.0), and can add
a stabilizing effect to training when completions are short.

If prompts are extremely long (relative to completions), it may make
sense to reduce this weight so as to avoid over-prioritizing
learning the prompt.
| | -| `suffix` | *string* | :heavy_minus_sign: | A string of up to 40 characters that will be added to your fine-tuned model name.

For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
| | -| `trainingFile` | *string* | :heavy_check_mark: | The ID of an uploaded file that contains training data.

See [upload file](/docs/api-reference/files/upload) for how to upload a file.

Your dataset must be formatted as a JSONL file, where each training
example is a JSON object with the keys "prompt" and "completion".
Additionally, you must upload your file with the purpose `fine-tune`.

See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.
| file-ajSREls59WBbvgSzJSVWxMCB | -| `validationFile` | *string* | :heavy_minus_sign: | The ID of an uploaded file that contains validation data.

If you provide this file, the data is used to generate validation
metrics periodically during fine-tuning. These metrics can be viewed in
the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model).
Your train and validation data should be mutually exclusive.

Your dataset must be formatted as a JSONL file, where each validation
example is a JSON object with the keys "prompt" and "completion".
Additionally, you must upload your file with the purpose `fine-tune`.

See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.
| file-XjSREls59WBbvgSzJSVWxMCa | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `batchSize` | *number* | :heavy_minus_sign: | The batch size to use for training. The batch size is the number of
training examples used to train a single forward and backward pass.

By default, the batch size will be dynamically configured to be
~0.2% of the number of examples in the training set, capped at 256 -
in general, we've found that larger batch sizes tend to work better
for larger datasets.
| | +| `classificationBetas` | *number*[] | :heavy_minus_sign: | If this is provided, we calculate F-beta scores at the specified
beta values. The F-beta score is a generalization of F-1 score.
This is only used for binary classification.

With a beta of 1 (i.e. the F-1 score), precision and recall are
given the same weight. A larger beta score puts more weight on
recall and less on precision. A smaller beta score puts more weight
on precision and less on recall.
| | +| `classificationNClasses` | *number* | :heavy_minus_sign: | The number of classes in a classification task.

This parameter is required for multiclass classification.
| | +| `classificationPositiveClass` | *string* | :heavy_minus_sign: | The positive class in binary classification.

This parameter is needed to generate precision, recall, and F1
metrics when doing binary classification.
| | +| `computeClassificationMetrics` | *boolean* | :heavy_minus_sign: | If set, we calculate classification-specific metrics such as accuracy
and F-1 score using the validation set at the end of every epoch.
These metrics can be viewed in the [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model).

In order to compute classification metrics, you must provide a
`validation_file`. Additionally, you must
specify `classification_n_classes` for multiclass classification or
`classification_positive_class` for binary classification.
| | +| `learningRateMultiplier` | *number* | :heavy_minus_sign: | The learning rate multiplier to use for training.
The fine-tuning learning rate is the original learning rate used for
pretraining multiplied by this value.

By default, the learning rate multiplier is the 0.05, 0.1, or 0.2
depending on final `batch_size` (larger learning rates tend to
perform better with larger batch sizes). We recommend experimenting
with values in the range 0.02 to 0.2 to see what produces the best
results.
| | +| `model` | *any* | :heavy_minus_sign: | The name of the base model to fine-tune. You can select one of "ada",
"babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22.
To learn more about these models, see the
[Models](/docs/models) documentation.
| | +| `nEpochs` | *number* | :heavy_minus_sign: | The number of epochs to train the model for. An epoch refers to one
full cycle through the training dataset.
| | +| `promptLossWeight` | *number* | :heavy_minus_sign: | The weight to use for loss on the prompt tokens. This controls how
much the model tries to learn to generate the prompt (as compared
to the completion which always has a weight of 1.0), and can add
a stabilizing effect to training when completions are short.

If prompts are extremely long (relative to completions), it may make
sense to reduce this weight so as to avoid over-prioritizing
learning the prompt.
| | +| `suffix` | *string* | :heavy_minus_sign: | A string of up to 40 characters that will be added to your fine-tuned model name.

For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
| | +| `trainingFile` | *string* | :heavy_check_mark: | The ID of an uploaded file that contains training data.

See [upload file](/docs/api-reference/files/upload) for how to upload a file.

Your dataset must be formatted as a JSONL file, where each training
example is a JSON object with the keys "prompt" and "completion".
Additionally, you must upload your file with the purpose `fine-tune`.

See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details.
| file-abc123 | +| `validationFile` | *string* | :heavy_minus_sign: | The ID of an uploaded file that contains validation data.

If you provide this file, the data is used to generate validation
metrics periodically during fine-tuning. These metrics can be viewed in
the [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model).
Your train and validation data should be mutually exclusive.

Your dataset must be formatted as a JSONL file, where each validation
example is a JSON object with the keys "prompt" and "completion".
Additionally, you must upload your file with the purpose `fine-tune`.

See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details.
| file-abc123 | \ No newline at end of file diff --git a/docs/models/shared/createfinetunerequestmodel2.md b/docs/models/shared/createfinetunerequestmodel2.md index 394d64f..61cae6a 100755 --- a/docs/models/shared/createfinetunerequestmodel2.md +++ b/docs/models/shared/createfinetunerequestmodel2.md @@ -1,9 +1,9 @@ # CreateFineTuneRequestModel2 The name of the base model to fine-tune. You can select one of "ada", -"babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. +"babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. To learn more about these models, see the -[Models](https://platform.openai.com/docs/models) documentation. +[Models](/docs/models) documentation. diff --git a/docs/models/shared/createfinetuningjobrequest.md b/docs/models/shared/createfinetuningjobrequest.md new file mode 100755 index 0000000..85825b5 --- /dev/null +++ b/docs/models/shared/createfinetuningjobrequest.md @@ -0,0 +1,12 @@ +# CreateFineTuningJobRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `hyperparameters` | [CreateFineTuningJobRequestHyperparameters](../../models/shared/createfinetuningjobrequesthyperparameters.md) | :heavy_minus_sign: | The hyperparameters used for the fine-tuning job. | | +| `model` | *any* | :heavy_check_mark: | The name of the model to fine-tune. You can select one of the
[supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
| | +| `suffix` | *string* | :heavy_minus_sign: | A string of up to 40 characters that will be added to your fine-tuned model name.

For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
| | +| `trainingFile` | *string* | :heavy_check_mark: | The ID of an uploaded file that contains training data.

See [upload file](/docs/api-reference/files/upload) for how to upload a file.

Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.

See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
| file-abc123 | +| `validationFile` | *string* | :heavy_minus_sign: | The ID of an uploaded file that contains validation data.

If you provide this file, the data is used to generate validation
metrics periodically during fine-tuning. These metrics can be viewed in
the fine-tuning results file.
The same data should not be present in both train and validation files.

Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`.

See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
| file-abc123 | \ No newline at end of file diff --git a/docs/models/shared/createfinetuningjobrequesthyperparameters.md b/docs/models/shared/createfinetuningjobrequesthyperparameters.md new file mode 100755 index 0000000..c9786c6 --- /dev/null +++ b/docs/models/shared/createfinetuningjobrequesthyperparameters.md @@ -0,0 +1,10 @@ +# CreateFineTuningJobRequestHyperparameters + +The hyperparameters used for the fine-tuning job. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `nEpochs` | *any* | :heavy_minus_sign: | The number of epochs to train the model for. An epoch refers to one
full cycle through the training dataset.
| \ No newline at end of file diff --git a/docs/models/shared/createfinetuningjobrequesthyperparametersnepochs1.md b/docs/models/shared/createfinetuningjobrequesthyperparametersnepochs1.md new file mode 100755 index 0000000..643ab74 --- /dev/null +++ b/docs/models/shared/createfinetuningjobrequesthyperparametersnepochs1.md @@ -0,0 +1,12 @@ +# CreateFineTuningJobRequestHyperparametersNEpochs1 + +The number of epochs to train the model for. An epoch refers to one +full cycle through the training dataset. + + + +## Values + +| Name | Value | +| ------ | ------ | +| `Auto` | auto | \ No newline at end of file diff --git a/docs/models/shared/createfinetuningjobrequestmodel2.md b/docs/models/shared/createfinetuningjobrequestmodel2.md new file mode 100755 index 0000000..627ccfb --- /dev/null +++ b/docs/models/shared/createfinetuningjobrequestmodel2.md @@ -0,0 +1,14 @@ +# CreateFineTuningJobRequestModel2 + +The name of the model to fine-tune. You can select one of the +[supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `Babbage002` | babbage-002 | +| `Davinci002` | davinci-002 | +| `Gpt35Turbo` | gpt-3.5-turbo | \ No newline at end of file diff --git a/docs/models/shared/finetune.md b/docs/models/shared/finetune.md index 964eadd..4f7452d 100755 --- a/docs/models/shared/finetune.md +++ b/docs/models/shared/finetune.md @@ -1,23 +1,25 @@ -# FineTune +# ~~FineTune~~ -The `FineTune` object represents a fine-tuning job that has been created through the API. +The `FineTune` object represents a legacy fine-tune job that has been created through the API. +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + ## Fields -| Field | Type | Required | Description | -| ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | -| `createdAt` | *number* | :heavy_check_mark: | The unix timestamp for when the fine-tuning job was created. | -| `events` | [FineTuneEvent](../../models/shared/finetuneevent.md)[] | :heavy_minus_sign: | The list of events that have been observed in the lifecycle of the FineTune job. | -| `fineTunedModel` | *string* | :heavy_check_mark: | The name of the fine-tuned model that is being created. | -| `hyperparams` | [FineTuneHyperparams](../../models/shared/finetunehyperparams.md) | :heavy_check_mark: | The hyperparameters used for the fine-tuning job. See the [Fine-tuning Guide](/docs/guides/fine-tuning/hyperparameters) for more details. | -| `id` | *string* | :heavy_check_mark: | The object identifier, which can be referenced in the API endpoints. | -| `model` | *string* | :heavy_check_mark: | The base model that is being fine-tuned. | -| `object` | *string* | :heavy_check_mark: | The object type, which is always "fine-tune". | -| `organizationId` | *string* | :heavy_check_mark: | The organization that owns the fine-tuning job. | -| `resultFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | The compiled results files for the fine-tuning job. | -| `status` | *string* | :heavy_check_mark: | The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, `succeeded`, `failed`, or `cancelled`. | -| `trainingFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | The list of files used for training. | -| `updatedAt` | *number* | :heavy_check_mark: | The unix timestamp for when the fine-tuning job was last updated. | -| `validationFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | The list of files used for validation. | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------ | +| `createdAt` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) for when the fine-tuning job was created. | +| `events` | [FineTuneEvent](../../models/shared/finetuneevent.md)[] | :heavy_minus_sign: | The list of events that have been observed in the lifecycle of the FineTune job. | +| `fineTunedModel` | *string* | :heavy_check_mark: | The name of the fine-tuned model that is being created. | +| `hyperparams` | [FineTuneHyperparams](../../models/shared/finetunehyperparams.md) | :heavy_check_mark: | The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. | +| `id` | *string* | :heavy_check_mark: | The object identifier, which can be referenced in the API endpoints. | +| `model` | *string* | :heavy_check_mark: | The base model that is being fine-tuned. | +| `object` | *string* | :heavy_check_mark: | The object type, which is always "fine-tune". | +| `organizationId` | *string* | :heavy_check_mark: | The organization that owns the fine-tuning job. | +| `resultFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | The compiled results files for the fine-tuning job. | +| `status` | *string* | :heavy_check_mark: | The current status of the fine-tuning job, which can be either `created`, `running`, `succeeded`, `failed`, or `cancelled`. | +| `trainingFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | The list of files used for training. | +| `updatedAt` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) for when the fine-tuning job was last updated. | +| `validationFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | The list of files used for validation. | \ No newline at end of file diff --git a/docs/models/shared/finetunehyperparams.md b/docs/models/shared/finetunehyperparams.md index f06d277..81ebacf 100755 --- a/docs/models/shared/finetunehyperparams.md +++ b/docs/models/shared/finetunehyperparams.md @@ -1,6 +1,6 @@ # FineTuneHyperparams -The hyperparameters used for the fine-tuning job. See the [Fine-tuning Guide](/docs/guides/fine-tuning/hyperparameters) for more details. +The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. ## Fields diff --git a/docs/models/shared/finetuningjob.md b/docs/models/shared/finetuningjob.md new file mode 100755 index 0000000..fe5b4f1 --- /dev/null +++ b/docs/models/shared/finetuningjob.md @@ -0,0 +1,23 @@ +# FineTuningJob + +The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | +| `createdAt` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) for when the fine-tuning job was created. | +| `fineTunedModel` | *string* | :heavy_check_mark: | The name of the fine-tuned model that is being created. | +| `finishedAt` | *number* | :heavy_minus_sign: | The Unix timestamp (in seconds) for when the fine-tuning job was finished. | +| `hyperparameters` | [FineTuningJobHyperparameters](../../models/shared/finetuningjobhyperparameters.md) | :heavy_check_mark: | The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. | +| `id` | *string* | :heavy_check_mark: | The object identifier, which can be referenced in the API endpoints. | +| `model` | *string* | :heavy_check_mark: | The base model that is being fine-tuned. | +| `object` | *string* | :heavy_check_mark: | The object type, which is always "fine_tuning.job". | +| `organizationId` | *string* | :heavy_check_mark: | The organization that owns the fine-tuning job. | +| `resultFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | The compiled results files for the fine-tuning job. | +| `status` | *string* | :heavy_check_mark: | The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, `succeeded`, `failed`, or `cancelled`. | +| `trainedTokens` | *number* | :heavy_check_mark: | The total number of billable tokens processed by this fine tuning job. | +| `trainingFile` | *string* | :heavy_check_mark: | The file ID used for training. | +| `validationFile` | *string* | :heavy_check_mark: | The file ID used for validation. | \ No newline at end of file diff --git a/docs/models/shared/finetuningjobevent.md b/docs/models/shared/finetuningjobevent.md new file mode 100755 index 0000000..d899f9b --- /dev/null +++ b/docs/models/shared/finetuningjobevent.md @@ -0,0 +1,11 @@ +# FineTuningJobEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | +| `createdAt` | *number* | :heavy_check_mark: | N/A | +| `level` | [FineTuningJobEventLevel](../../models/shared/finetuningjobeventlevel.md) | :heavy_check_mark: | N/A | +| `message` | *string* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/finetuningjobeventlevel.md b/docs/models/shared/finetuningjobeventlevel.md new file mode 100755 index 0000000..c2baf60 --- /dev/null +++ b/docs/models/shared/finetuningjobeventlevel.md @@ -0,0 +1,10 @@ +# FineTuningJobEventLevel + + +## Values + +| Name | Value | +| ------- | ------- | +| `Info` | info | +| `Warn` | warn | +| `Error` | error | \ No newline at end of file diff --git a/docs/models/shared/finetuningjobhyperparameters.md b/docs/models/shared/finetuningjobhyperparameters.md new file mode 100755 index 0000000..a81da83 --- /dev/null +++ b/docs/models/shared/finetuningjobhyperparameters.md @@ -0,0 +1,10 @@ +# FineTuningJobHyperparameters + +The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `nEpochs` | *any* | :heavy_minus_sign: | The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
"Auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. | \ No newline at end of file diff --git a/docs/models/shared/finetuningjobhyperparametersnepochs1.md b/docs/models/shared/finetuningjobhyperparametersnepochs1.md new file mode 100755 index 0000000..3525a69 --- /dev/null +++ b/docs/models/shared/finetuningjobhyperparametersnepochs1.md @@ -0,0 +1,11 @@ +# FineTuningJobHyperparametersNEpochs1 + +The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. +"Auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + + +## Values + +| Name | Value | +| ------ | ------ | +| `Auto` | auto | \ No newline at end of file diff --git a/docs/models/shared/listfinetuningjobeventsresponse.md b/docs/models/shared/listfinetuningjobeventsresponse.md new file mode 100755 index 0000000..77787a5 --- /dev/null +++ b/docs/models/shared/listfinetuningjobeventsresponse.md @@ -0,0 +1,11 @@ +# ListFineTuningJobEventsResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | +| `data` | [FineTuningJobEvent](../../models/shared/finetuningjobevent.md)[] | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/listpaginatedfinetuningjobsresponse.md b/docs/models/shared/listpaginatedfinetuningjobsresponse.md new file mode 100755 index 0000000..b37d45e --- /dev/null +++ b/docs/models/shared/listpaginatedfinetuningjobsresponse.md @@ -0,0 +1,12 @@ +# ListPaginatedFineTuningJobsResponse + +OK + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `data` | [FineTuningJob](../../models/shared/finetuningjob.md)[] | :heavy_check_mark: | N/A | +| `hasMore` | *boolean* | :heavy_check_mark: | N/A | +| `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/model.md b/docs/models/shared/model.md index 21c0bd5..bfaeee8 100755 --- a/docs/models/shared/model.md +++ b/docs/models/shared/model.md @@ -7,7 +7,7 @@ Describes an OpenAI model offering that can be used with the API. | Field | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `created` | *number* | :heavy_check_mark: | The date and time when the model was created. | +| `created` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) when the model was created. | | `id` | *string* | :heavy_check_mark: | The model identifier, which can be referenced in the API endpoints. | | `object` | *string* | :heavy_check_mark: | The object type, which is always "model". | | `ownedBy` | *string* | :heavy_check_mark: | The organization that owns the model. | \ No newline at end of file diff --git a/docs/models/shared/openaifile.md b/docs/models/shared/openaifile.md index e08e546..0e1fb8f 100755 --- a/docs/models/shared/openaifile.md +++ b/docs/models/shared/openaifile.md @@ -9,7 +9,7 @@ The `File` object represents a document that has been uploaded to OpenAI. | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | | `bytes` | *number* | :heavy_check_mark: | The size of the file in bytes. | -| `createdAt` | *number* | :heavy_check_mark: | The unix timestamp for when the file was created. | +| `createdAt` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) for when the file was created. | | `filename` | *string* | :heavy_check_mark: | The name of the file. | | `id` | *string* | :heavy_check_mark: | The file identifier, which can be referenced in the API endpoints. | | `object` | *string* | :heavy_check_mark: | The object type, which is always "file". | diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index 8d8ac54..ad63d9d 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -6,7 +6,9 @@ The OpenAI REST API ### Available Operations -* [cancelFineTune](#cancelfinetune) - Immediately cancel a fine-tune job. +* [~~cancelFineTune~~](#cancelfinetune) - Immediately cancel a fine-tune job. + :warning: **Deprecated** +* [cancelFineTuningJob](#cancelfinetuningjob) - Immediately cancel a fine-tune job. * [createChatCompletion](#createchatcompletion) - Creates a model response for the given chat conversation. * [createCompletion](#createcompletion) - Creates a completion for the provided prompt and parameters. @@ -14,11 +16,17 @@ The OpenAI REST API * [createEmbedding](#createembedding) - Creates an embedding vector representing the input text. * [createFile](#createfile) - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. -* [createFineTune](#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. +* [~~createFineTune~~](#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. -[Learn more about Fine-tuning](/docs/guides/fine-tuning) +[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + :warning: **Deprecated** +* [createFineTuningJob](#createfinetuningjob) - Creates a job that fine-tunes a specified model from a given dataset. + +Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + +[Learn more about fine-tuning](/docs/guides/fine-tuning) * [createImage](#createimage) - Creates an image given a prompt. * [createImageEdit](#createimageedit) - Creates an edited or extended image given an original image and a prompt. @@ -27,26 +35,36 @@ Response includes details of the enqueued job including job status and the name * [createTranscription](#createtranscription) - Transcribes audio into the input language. * [createTranslation](#createtranslation) - Translates audio into English. * [deleteFile](#deletefile) - Delete a file. -* [deleteModel](#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization. +* [deleteModel](#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. * [downloadFile](#downloadfile) - Returns the contents of the specified file * [listFiles](#listfiles) - Returns a list of files that belong to the user's organization. -* [listFineTuneEvents](#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. - -* [listFineTunes](#listfinetunes) - List your organization's fine-tuning jobs +* [~~listFineTuneEvents~~](#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. + :warning: **Deprecated** +* [~~listFineTunes~~](#listfinetunes) - List your organization's fine-tuning jobs + :warning: **Deprecated** +* [listFineTuningEvents](#listfinetuningevents) - Get status updates for a fine-tuning job. * [listModels](#listmodels) - Lists the currently available models, and provides basic information about each one such as the owner and availability. +* [listPaginatedFineTuningJobs](#listpaginatedfinetuningjobs) - List your organization's fine-tuning jobs + * [retrieveFile](#retrievefile) - Returns information about a specific file. -* [retrieveFineTune](#retrievefinetune) - Gets info about the fine-tune job. +* [~~retrieveFineTune~~](#retrievefinetune) - Gets info about the fine-tune job. -[Learn more about Fine-tuning](/docs/guides/fine-tuning) +[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + :warning: **Deprecated** +* [retrieveFineTuningJob](#retrievefinetuningjob) - Get info about a fine-tuning job. + +[Learn more about fine-tuning](/docs/guides/fine-tuning) * [retrieveModel](#retrievemodel) - Retrieves a model instance, providing basic information about the model such as the owner and permissioning. -## cancelFineTune +## ~~cancelFineTune~~ Immediately cancel a fine-tune job. +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + ### Example Usage ```typescript @@ -77,6 +95,41 @@ sdk.openAI.cancelFineTune({ **Promise<[operations.CancelFineTuneResponse](../../models/operations/cancelfinetuneresponse.md)>** +## cancelFineTuningJob + +Immediately cancel a fine-tune job. + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CancelFineTuningJobResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.cancelFineTuningJob({ + fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", +}).then((res: CancelFineTuningJobResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | +| `request` | [operations.CancelFineTuningJobRequest](../../models/operations/cancelfinetuningjobrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CancelFineTuningJobResponse](../../models/operations/cancelfinetuningjobresponse.md)>** + + ## createChatCompletion Creates a model response for the given chat conversation. @@ -373,15 +426,17 @@ sdk.openAI.createFile({ **Promise<[operations.CreateFileResponse](../../models/operations/createfileresponse.md)>** -## createFineTune +## ~~createFineTune~~ Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. -[Learn more about Fine-tuning](/docs/guides/fine-tuning) +[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + ### Example Usage ```typescript @@ -405,8 +460,8 @@ sdk.openAI.createFineTune({ nEpochs: 196582, promptLossWeight: 9495.72, suffix: "ipsam", - trainingFile: "file-ajSREls59WBbvgSzJSVWxMCB", - validationFile: "file-XjSREls59WBbvgSzJSVWxMCa", + trainingFile: "file-abc123", + validationFile: "file-abc123", }).then((res: CreateFineTuneResponse) => { if (res.statusCode == 200) { // handle response @@ -427,6 +482,55 @@ sdk.openAI.createFineTune({ **Promise<[operations.CreateFineTuneResponse](../../models/operations/createfinetuneresponse.md)>** +## createFineTuningJob + +Creates a job that fine-tunes a specified model from a given dataset. + +Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + +[Learn more about fine-tuning](/docs/guides/fine-tuning) + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateFineTuningJobResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; +import { + CreateFineTuningJobRequestHyperparametersNEpochs1, + CreateFineTuningJobRequestModel2, +} from "@speakeasy-api/openai/dist/sdk/models/shared"; + +const sdk = new Gpt(); + +sdk.openAI.createFineTuningJob({ + hyperparameters: { + nEpochs: 820994, + }, + model: "gpt-3.5-turbo", + suffix: "quasi", + trainingFile: "file-abc123", + validationFile: "file-abc123", +}).then((res: CreateFineTuningJobResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `request` | [shared.CreateFineTuningJobRequest](../../models/shared/createfinetuningjobrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateFineTuningJobResponse](../../models/operations/createfinetuningjobresponse.md)>** + + ## createImage Creates an image given a prompt. @@ -481,11 +585,11 @@ const sdk = new Gpt(); sdk.openAI.createImageEdit({ image: { - content: "id".encode(), - image: "possimus", + content: "error".encode(), + image: "temporibus", }, mask: { - content: "aut".encode(), + content: "laborum".encode(), mask: "quasi", }, n: 1, @@ -528,8 +632,8 @@ const sdk = new Gpt(); sdk.openAI.createImageVariation({ image: { - content: "error".encode(), - image: "temporibus", + content: "reiciendis".encode(), + image: "voluptatibus", }, n: 1, responseFormat: CreateImageVariationRequestResponseFormat.Url, @@ -571,6 +675,7 @@ const sdk = new Gpt(); sdk.openAI.createModeration({ input: [ "I want to kill them.", + "I want to kill them.", ], model: CreateModerationRequestModel2.TextModerationStable, }).then((res: CreateModerationResponse) => { @@ -609,13 +714,13 @@ const sdk = new Gpt(); sdk.openAI.createTranscription({ file: { content: "voluptatibus".encode(), - file: "vero", + file: "ipsa", }, - language: "nihil", - model: CreateTranscriptionRequestModel2.Whisper1, - prompt: "voluptatibus", + language: "omnis", + model: "whisper-1", + prompt: "cum", responseFormat: CreateTranscriptionRequestResponseFormat.Json, - temperature: 6048.46, + temperature: 391.87, }).then((res: CreateTranscriptionResponse) => { if (res.statusCode == 200) { // handle response @@ -651,13 +756,13 @@ const sdk = new Gpt(); sdk.openAI.createTranslation({ file: { - content: "voluptate".encode(), - file: "cum", + content: "reprehenderit".encode(), + file: "ut", }, - model: "whisper-1", - prompt: "doloremque", - responseFormat: "reprehenderit", - temperature: 2828.07, + model: CreateTranslationRequestModel2.Whisper1, + prompt: "dicta", + responseFormat: "corporis", + temperature: 2961.4, }).then((res: CreateTranslationResponse) => { if (res.statusCode == 200) { // handle response @@ -691,7 +796,7 @@ import { DeleteFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operat const sdk = new Gpt(); sdk.openAI.deleteFile({ - fileId: "maiores", + fileId: "iusto", }).then((res: DeleteFileResponse) => { if (res.statusCode == 200) { // handle response @@ -714,7 +819,7 @@ sdk.openAI.deleteFile({ ## deleteModel -Delete a fine-tuned model. You must have the Owner role in your organization. +Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. ### Example Usage @@ -725,7 +830,7 @@ import { DeleteModelResponse } from "@speakeasy-api/openai/dist/sdk/models/opera const sdk = new Gpt(); sdk.openAI.deleteModel({ - model: "curie:ft-acmeco-2021-03-03-21-44-20", + model: "ft:gpt-3.5-turbo:acemeco:suffix:abc123", }).then((res: DeleteModelResponse) => { if (res.statusCode == 200) { // handle response @@ -811,11 +916,13 @@ sdk.openAI.listFiles().then((res: ListFilesResponse) => { **Promise<[operations.ListFilesResponse](../../models/operations/listfilesresponse.md)>** -## listFineTuneEvents +## ~~listFineTuneEvents~~ Get fine-grained status updates for a fine-tune job. +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + ### Example Usage ```typescript @@ -847,11 +954,13 @@ sdk.openAI.listFineTuneEvents({ **Promise<[operations.ListFineTuneEventsResponse](../../models/operations/listfinetuneeventsresponse.md)>** -## listFineTunes +## ~~listFineTunes~~ List your organization's fine-tuning jobs +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + ### Example Usage ```typescript @@ -879,6 +988,43 @@ sdk.openAI.listFineTunes().then((res: ListFineTunesResponse) => { **Promise<[operations.ListFineTunesResponse](../../models/operations/listfinetunesresponse.md)>** +## listFineTuningEvents + +Get status updates for a fine-tuning job. + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { ListFineTuningEventsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.listFineTuningEvents({ + after: "harum", + fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + limit: 317983, +}).then((res: ListFineTuningEventsResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | +| `request` | [operations.ListFineTuningEventsRequest](../../models/operations/listfinetuningeventsrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.ListFineTuningEventsResponse](../../models/operations/listfinetuningeventsresponse.md)>** + + ## listModels Lists the currently available models, and provides basic information about each one such as the owner and availability. @@ -910,6 +1056,42 @@ sdk.openAI.listModels().then((res: ListModelsResponse) => { **Promise<[operations.ListModelsResponse](../../models/operations/listmodelsresponse.md)>** +## listPaginatedFineTuningJobs + +List your organization's fine-tuning jobs + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { ListPaginatedFineTuningJobsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.listPaginatedFineTuningJobs({ + after: "accusamus", + limit: 414263, +}).then((res: ListPaginatedFineTuningJobsResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | +| `request` | [operations.ListPaginatedFineTuningJobsRequest](../../models/operations/listpaginatedfinetuningjobsrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.ListPaginatedFineTuningJobsResponse](../../models/operations/listpaginatedfinetuningjobsresponse.md)>** + + ## retrieveFile Returns information about a specific file. @@ -923,7 +1105,7 @@ import { RetrieveFileResponse } from "@speakeasy-api/openai/dist/sdk/models/oper const sdk = new Gpt(); sdk.openAI.retrieveFile({ - fileId: "corporis", + fileId: "repudiandae", }).then((res: RetrieveFileResponse) => { if (res.statusCode == 200) { // handle response @@ -944,13 +1126,15 @@ sdk.openAI.retrieveFile({ **Promise<[operations.RetrieveFileResponse](../../models/operations/retrievefileresponse.md)>** -## retrieveFineTune +## ~~retrieveFineTune~~ Gets info about the fine-tune job. -[Learn more about Fine-tuning](/docs/guides/fine-tuning) +[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + ### Example Usage ```typescript @@ -981,6 +1165,43 @@ sdk.openAI.retrieveFineTune({ **Promise<[operations.RetrieveFineTuneResponse](../../models/operations/retrievefinetuneresponse.md)>** +## retrieveFineTuningJob + +Get info about a fine-tuning job. + +[Learn more about fine-tuning](/docs/guides/fine-tuning) + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { RetrieveFineTuningJobResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; + +const sdk = new Gpt(); + +sdk.openAI.retrieveFineTuningJob({ + fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", +}).then((res: RetrieveFineTuningJobResponse) => { + if (res.statusCode == 200) { + // handle response + } +}); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | +| `request` | [operations.RetrieveFineTuningJobRequest](../../models/operations/retrievefinetuningjobrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.RetrieveFineTuningJobResponse](../../models/operations/retrievefinetuningjobresponse.md)>** + + ## retrieveModel Retrieves a model instance, providing basic information about the model such as the owner and permissioning. @@ -994,7 +1215,7 @@ import { RetrieveModelResponse } from "@speakeasy-api/openai/dist/sdk/models/ope const sdk = new Gpt(); sdk.openAI.retrieveModel({ - model: "text-davinci-001", + model: "gpt-3.5-turbo", }).then((res: RetrieveModelResponse) => { if (res.statusCode == 200) { // handle response diff --git a/files.gen b/files.gen index 552ece1..adf8e93 100755 --- a/files.gen +++ b/files.gen @@ -20,12 +20,14 @@ src/sdk/types/index.ts src/sdk/types/rfcdate.ts tsconfig.json src/sdk/models/operations/cancelfinetune.ts +src/sdk/models/operations/cancelfinetuningjob.ts src/sdk/models/operations/createchatcompletion.ts src/sdk/models/operations/createcompletion.ts src/sdk/models/operations/createedit.ts src/sdk/models/operations/createembedding.ts src/sdk/models/operations/createfile.ts src/sdk/models/operations/createfinetune.ts +src/sdk/models/operations/createfinetuningjob.ts src/sdk/models/operations/createimage.ts src/sdk/models/operations/createimageedit.ts src/sdk/models/operations/createimagevariation.ts @@ -38,14 +40,18 @@ src/sdk/models/operations/downloadfile.ts src/sdk/models/operations/listfiles.ts src/sdk/models/operations/listfinetuneevents.ts src/sdk/models/operations/listfinetunes.ts +src/sdk/models/operations/listfinetuningevents.ts src/sdk/models/operations/listmodels.ts +src/sdk/models/operations/listpaginatedfinetuningjobs.ts src/sdk/models/operations/retrievefile.ts src/sdk/models/operations/retrievefinetune.ts +src/sdk/models/operations/retrievefinetuningjob.ts src/sdk/models/operations/retrievemodel.ts src/sdk/models/operations/index.ts src/sdk/models/shared/finetune.ts src/sdk/models/shared/openaifile.ts src/sdk/models/shared/finetuneevent.ts +src/sdk/models/shared/finetuningjob.ts src/sdk/models/shared/createchatcompletionresponse.ts src/sdk/models/shared/completionusage.ts src/sdk/models/shared/chatcompletionresponsemessage.ts @@ -62,6 +68,7 @@ src/sdk/models/shared/embedding.ts src/sdk/models/shared/createembeddingrequest.ts src/sdk/models/shared/createfilerequest.ts src/sdk/models/shared/createfinetunerequest.ts +src/sdk/models/shared/createfinetuningjobrequest.ts src/sdk/models/shared/imagesresponse.ts src/sdk/models/shared/image.ts src/sdk/models/shared/createimagerequest.ts @@ -78,8 +85,11 @@ src/sdk/models/shared/deletemodelresponse.ts src/sdk/models/shared/listfilesresponse.ts src/sdk/models/shared/listfinetuneeventsresponse.ts src/sdk/models/shared/listfinetunesresponse.ts +src/sdk/models/shared/listfinetuningjobeventsresponse.ts +src/sdk/models/shared/finetuningjobevent.ts src/sdk/models/shared/listmodelsresponse.ts src/sdk/models/shared/model.ts +src/sdk/models/shared/listpaginatedfinetuningjobsresponse.ts src/sdk/models/shared/index.ts src/sdk/models/errors/index.ts docs/sdks/gpt/README.md @@ -87,12 +97,15 @@ docs/sdks/openai/README.md USAGE.md docs/models/operations/cancelfinetunerequest.md docs/models/operations/cancelfinetuneresponse.md +docs/models/operations/cancelfinetuningjobrequest.md +docs/models/operations/cancelfinetuningjobresponse.md docs/models/operations/createchatcompletionresponse.md docs/models/operations/createcompletionresponse.md docs/models/operations/createeditresponse.md docs/models/operations/createembeddingresponse.md docs/models/operations/createfileresponse.md docs/models/operations/createfinetuneresponse.md +docs/models/operations/createfinetuningjobresponse.md docs/models/operations/createimageresponse.md docs/models/operations/createimageeditresponse.md docs/models/operations/createimagevariationresponse.md @@ -109,17 +122,26 @@ docs/models/operations/listfilesresponse.md docs/models/operations/listfinetuneeventsrequest.md docs/models/operations/listfinetuneeventsresponse.md docs/models/operations/listfinetunesresponse.md +docs/models/operations/listfinetuningeventsrequest.md +docs/models/operations/listfinetuningeventsresponse.md docs/models/operations/listmodelsresponse.md +docs/models/operations/listpaginatedfinetuningjobsrequest.md +docs/models/operations/listpaginatedfinetuningjobsresponse.md docs/models/operations/retrievefilerequest.md docs/models/operations/retrievefileresponse.md docs/models/operations/retrievefinetunerequest.md docs/models/operations/retrievefinetuneresponse.md +docs/models/operations/retrievefinetuningjobrequest.md +docs/models/operations/retrievefinetuningjobresponse.md docs/models/operations/retrievemodelrequest.md docs/models/operations/retrievemodelresponse.md docs/models/shared/finetunehyperparams.md docs/models/shared/finetune.md docs/models/shared/openaifile.md docs/models/shared/finetuneevent.md +docs/models/shared/finetuningjobhyperparametersnepochs1.md +docs/models/shared/finetuningjobhyperparameters.md +docs/models/shared/finetuningjob.md docs/models/shared/createchatcompletionresponsechoicesfinishreason.md docs/models/shared/createchatcompletionresponsechoices.md docs/models/shared/createchatcompletionresponse.md @@ -155,6 +177,10 @@ docs/models/shared/createfilerequestfile.md docs/models/shared/createfilerequest.md docs/models/shared/createfinetunerequestmodel2.md docs/models/shared/createfinetunerequest.md +docs/models/shared/createfinetuningjobrequesthyperparametersnepochs1.md +docs/models/shared/createfinetuningjobrequesthyperparameters.md +docs/models/shared/createfinetuningjobrequestmodel2.md +docs/models/shared/createfinetuningjobrequest.md docs/models/shared/imagesresponse.md docs/models/shared/image.md docs/models/shared/createimagerequestresponseformat.md @@ -189,6 +215,10 @@ docs/models/shared/deletemodelresponse.md docs/models/shared/listfilesresponse.md docs/models/shared/listfinetuneeventsresponse.md docs/models/shared/listfinetunesresponse.md +docs/models/shared/listfinetuningjobeventsresponse.md +docs/models/shared/finetuningjobeventlevel.md +docs/models/shared/finetuningjobevent.md docs/models/shared/listmodelsresponse.md docs/models/shared/model.md +docs/models/shared/listpaginatedfinetuningjobsresponse.md .gitattributes \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 8cd6ccc..5a631db 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,9 +1,9 @@ configVersion: 1.0.0 management: - docChecksum: 25bfe8c5a77a1c2cf6a1be5713abc57e + docChecksum: d8bc174529c48277b3d4ce3cf12b17fd docVersion: 2.0.0 - speakeasyVersion: 1.77.0 - generationVersion: 2.91.2 + speakeasyVersion: 1.77.2 + generationVersion: 2.93.0 generation: sdkClassName: gpt sdkFlattening: true @@ -11,11 +11,11 @@ generation: telemetryEnabled: false features: typescript: - core: 2.85.0 + core: 2.85.2 deprecations: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.21.0 + version: 2.21.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index e2b603d..688fdc3 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.21.0", + "version": "2.21.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.21.0", + "version": "2.21.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 0bc3080..561221a 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.21.0", + "version": "2.21.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/operations/cancelfinetuningjob.ts b/src/sdk/models/operations/cancelfinetuningjob.ts new file mode 100755 index 0000000..9aa60f7 --- /dev/null +++ b/src/sdk/models/operations/cancelfinetuningjob.ts @@ -0,0 +1,35 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; +import { AxiosResponse } from "axios"; + +export class CancelFineTuningJobRequest extends SpeakeasyBase { + /** + * The ID of the fine-tuning job to cancel + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=fine_tuning_job_id" }) + fineTuningJobId: string; +} + +export class CancelFineTuningJobResponse extends SpeakeasyBase { + @SpeakeasyMetadata() + contentType: string; + + /** + * OK + */ + @SpeakeasyMetadata() + fineTuningJob?: shared.FineTuningJob; + + @SpeakeasyMetadata() + statusCode: number; + + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; +} diff --git a/src/sdk/models/operations/createfinetuningjob.ts b/src/sdk/models/operations/createfinetuningjob.ts new file mode 100755 index 0000000..c2c86ca --- /dev/null +++ b/src/sdk/models/operations/createfinetuningjob.ts @@ -0,0 +1,24 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; +import { AxiosResponse } from "axios"; + +export class CreateFineTuningJobResponse extends SpeakeasyBase { + @SpeakeasyMetadata() + contentType: string; + + /** + * OK + */ + @SpeakeasyMetadata() + fineTuningJob?: shared.FineTuningJob; + + @SpeakeasyMetadata() + statusCode: number; + + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; +} diff --git a/src/sdk/models/operations/index.ts b/src/sdk/models/operations/index.ts index e2fa564..5ed440c 100755 --- a/src/sdk/models/operations/index.ts +++ b/src/sdk/models/operations/index.ts @@ -3,12 +3,14 @@ */ export * from "./cancelfinetune"; +export * from "./cancelfinetuningjob"; export * from "./createchatcompletion"; export * from "./createcompletion"; export * from "./createedit"; export * from "./createembedding"; export * from "./createfile"; export * from "./createfinetune"; +export * from "./createfinetuningjob"; export * from "./createimage"; export * from "./createimageedit"; export * from "./createimagevariation"; @@ -21,7 +23,10 @@ export * from "./downloadfile"; export * from "./listfiles"; export * from "./listfinetuneevents"; export * from "./listfinetunes"; +export * from "./listfinetuningevents"; export * from "./listmodels"; +export * from "./listpaginatedfinetuningjobs"; export * from "./retrievefile"; export * from "./retrievefinetune"; +export * from "./retrievefinetuningjob"; export * from "./retrievemodel"; diff --git a/src/sdk/models/operations/listfinetuningevents.ts b/src/sdk/models/operations/listfinetuningevents.ts new file mode 100755 index 0000000..be1812b --- /dev/null +++ b/src/sdk/models/operations/listfinetuningevents.ts @@ -0,0 +1,47 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; +import { AxiosResponse } from "axios"; + +export class ListFineTuningEventsRequest extends SpeakeasyBase { + /** + * Identifier for the last event from the previous pagination request. + */ + @SpeakeasyMetadata({ data: "queryParam, style=form;explode=true;name=after" }) + after?: string; + + /** + * The ID of the fine-tuning job to get events for. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=fine_tuning_job_id" }) + fineTuningJobId: string; + + /** + * Number of events to retrieve. + */ + @SpeakeasyMetadata({ data: "queryParam, style=form;explode=true;name=limit" }) + limit?: number; +} + +export class ListFineTuningEventsResponse extends SpeakeasyBase { + @SpeakeasyMetadata() + contentType: string; + + /** + * OK + */ + @SpeakeasyMetadata() + listFineTuningJobEventsResponse?: shared.ListFineTuningJobEventsResponse; + + @SpeakeasyMetadata() + statusCode: number; + + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; +} diff --git a/src/sdk/models/operations/listpaginatedfinetuningjobs.ts b/src/sdk/models/operations/listpaginatedfinetuningjobs.ts new file mode 100755 index 0000000..66b9cdc --- /dev/null +++ b/src/sdk/models/operations/listpaginatedfinetuningjobs.ts @@ -0,0 +1,38 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; +import { AxiosResponse } from "axios"; + +export class ListPaginatedFineTuningJobsRequest extends SpeakeasyBase { + /** + * Identifier for the last job from the previous pagination request. + */ + @SpeakeasyMetadata({ data: "queryParam, style=form;explode=true;name=after" }) + after?: string; + + /** + * Number of fine-tuning jobs to retrieve. + */ + @SpeakeasyMetadata({ data: "queryParam, style=form;explode=true;name=limit" }) + limit?: number; +} + +export class ListPaginatedFineTuningJobsResponse extends SpeakeasyBase { + @SpeakeasyMetadata() + contentType: string; + + /** + * OK + */ + @SpeakeasyMetadata() + listPaginatedFineTuningJobsResponse?: shared.ListPaginatedFineTuningJobsResponse; + + @SpeakeasyMetadata() + statusCode: number; + + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; +} diff --git a/src/sdk/models/operations/retrievefinetuningjob.ts b/src/sdk/models/operations/retrievefinetuningjob.ts new file mode 100755 index 0000000..917a8a2 --- /dev/null +++ b/src/sdk/models/operations/retrievefinetuningjob.ts @@ -0,0 +1,35 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import * as shared from "../shared"; +import { AxiosResponse } from "axios"; + +export class RetrieveFineTuningJobRequest extends SpeakeasyBase { + /** + * The ID of the fine-tuning job + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=fine_tuning_job_id" }) + fineTuningJobId: string; +} + +export class RetrieveFineTuningJobResponse extends SpeakeasyBase { + @SpeakeasyMetadata() + contentType: string; + + /** + * OK + */ + @SpeakeasyMetadata() + fineTuningJob?: shared.FineTuningJob; + + @SpeakeasyMetadata() + statusCode: number; + + @SpeakeasyMetadata() + rawResponse?: AxiosResponse; +} diff --git a/src/sdk/models/shared/createchatcompletionrequest.ts b/src/sdk/models/shared/createchatcompletionrequest.ts index 2f03a44..7430b81 100755 --- a/src/sdk/models/shared/createchatcompletionrequest.ts +++ b/src/sdk/models/shared/createchatcompletionrequest.ts @@ -38,7 +38,7 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { * * @remarks * - * [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) * */ @SpeakeasyMetadata() @@ -111,7 +111,7 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { * * @remarks * - * [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) * */ @SpeakeasyMetadata() diff --git a/src/sdk/models/shared/createchatcompletionresponse.ts b/src/sdk/models/shared/createchatcompletionresponse.ts index 74e6155..41f0aa9 100755 --- a/src/sdk/models/shared/createchatcompletionresponse.ts +++ b/src/sdk/models/shared/createchatcompletionresponse.ts @@ -61,7 +61,7 @@ export class CreateChatCompletionResponse extends SpeakeasyBase { choices: CreateChatCompletionResponseChoices[]; /** - * A unix timestamp of when the chat completion was created. + * The Unix timestamp (in seconds) of when the chat completion was created. */ @SpeakeasyMetadata() @Expose({ name: "created" }) diff --git a/src/sdk/models/shared/createcompletionrequest.ts b/src/sdk/models/shared/createcompletionrequest.ts index 6cefcc7..8fc4c57 100755 --- a/src/sdk/models/shared/createcompletionrequest.ts +++ b/src/sdk/models/shared/createcompletionrequest.ts @@ -12,6 +12,8 @@ import { Expose } from "class-transformer"; * */ export enum CreateCompletionRequestModel2 { + Babbage002 = "babbage-002", + Davinci002 = "davinci-002", TextDavinci003 = "text-davinci-003", TextDavinci002 = "text-davinci-002", TextDavinci001 = "text-davinci-001", @@ -51,7 +53,7 @@ export class CreateCompletionRequest extends SpeakeasyBase { * * @remarks * - * [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) * */ @SpeakeasyMetadata() @@ -123,7 +125,7 @@ export class CreateCompletionRequest extends SpeakeasyBase { * * @remarks * - * [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) + * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) * */ @SpeakeasyMetadata() diff --git a/src/sdk/models/shared/createcompletionresponse.ts b/src/sdk/models/shared/createcompletionresponse.ts index 1e9bf88..5e7e50f 100755 --- a/src/sdk/models/shared/createcompletionresponse.ts +++ b/src/sdk/models/shared/createcompletionresponse.ts @@ -78,7 +78,7 @@ export class CreateCompletionResponse extends SpeakeasyBase { choices: CreateCompletionResponseChoices[]; /** - * The Unix timestamp of when the completion was created. + * The Unix timestamp (in seconds) of when the completion was created. */ @SpeakeasyMetadata() @Expose({ name: "created" }) diff --git a/src/sdk/models/shared/createeditresponse.ts b/src/sdk/models/shared/createeditresponse.ts index 5e9ee99..10c1504 100755 --- a/src/sdk/models/shared/createeditresponse.ts +++ b/src/sdk/models/shared/createeditresponse.ts @@ -47,6 +47,8 @@ export class CreateEditResponseChoices extends SpeakeasyBase { /** * OK + * + * @deprecated class: This will be removed in a future release, please migrate away from it as soon as possible. */ export class CreateEditResponse extends SpeakeasyBase { /** @@ -58,7 +60,7 @@ export class CreateEditResponse extends SpeakeasyBase { choices: CreateEditResponseChoices[]; /** - * A unix timestamp of when the edit was created. + * The Unix timestamp (in seconds) of when the edit was created. */ @SpeakeasyMetadata() @Expose({ name: "created" }) diff --git a/src/sdk/models/shared/createfilerequest.ts b/src/sdk/models/shared/createfilerequest.ts index 724512c..f4f411e 100755 --- a/src/sdk/models/shared/createfilerequest.ts +++ b/src/sdk/models/shared/createfilerequest.ts @@ -18,7 +18,7 @@ export class CreateFileRequest extends SpeakeasyBase { * * @remarks * - * If the `purpose` is set to "fine-tune", each line is a JSON record with "prompt" and "completion" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data). + * If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. * */ @SpeakeasyMetadata({ data: "multipart_form, file=true" }) @@ -29,7 +29,7 @@ export class CreateFileRequest extends SpeakeasyBase { * * @remarks * - * Use "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file. + * Use "fine-tune" for [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file. * */ @SpeakeasyMetadata({ data: "multipart_form, name=purpose" }) diff --git a/src/sdk/models/shared/createfinetunerequest.ts b/src/sdk/models/shared/createfinetunerequest.ts index 6414ac8..0ede59f 100755 --- a/src/sdk/models/shared/createfinetunerequest.ts +++ b/src/sdk/models/shared/createfinetunerequest.ts @@ -9,9 +9,9 @@ import { Expose } from "class-transformer"; * The name of the base model to fine-tune. You can select one of "ada", * * @remarks - * "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. + * "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. * To learn more about these models, see the - * [Models](https://platform.openai.com/docs/models) documentation. + * [Models](/docs/models) documentation. * */ export enum CreateFineTuneRequestModel2 { @@ -85,7 +85,7 @@ export class CreateFineTuneRequest extends SpeakeasyBase { * * @remarks * and F-1 score using the validation set at the end of every epoch. - * These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). + * These metrics can be viewed in the [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). * * In order to compute classification metrics, you must provide a * `validation_file`. Additionally, you must @@ -119,9 +119,9 @@ export class CreateFineTuneRequest extends SpeakeasyBase { * The name of the base model to fine-tune. You can select one of "ada", * * @remarks - * "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. + * "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. * To learn more about these models, see the - * [Models](https://platform.openai.com/docs/models) documentation. + * [Models](/docs/models) documentation. * */ @SpeakeasyMetadata() @@ -179,7 +179,7 @@ export class CreateFineTuneRequest extends SpeakeasyBase { * example is a JSON object with the keys "prompt" and "completion". * Additionally, you must upload your file with the purpose `fine-tune`. * - * See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. + * See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details. * */ @SpeakeasyMetadata() @@ -193,14 +193,14 @@ export class CreateFineTuneRequest extends SpeakeasyBase { * * If you provide this file, the data is used to generate validation * metrics periodically during fine-tuning. These metrics can be viewed in - * the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). + * the [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). * Your train and validation data should be mutually exclusive. * * Your dataset must be formatted as a JSONL file, where each validation * example is a JSON object with the keys "prompt" and "completion". * Additionally, you must upload your file with the purpose `fine-tune`. * - * See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details. + * See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details. * */ @SpeakeasyMetadata() diff --git a/src/sdk/models/shared/createfinetuningjobrequest.ts b/src/sdk/models/shared/createfinetuningjobrequest.ts new file mode 100755 index 0000000..ab4501a --- /dev/null +++ b/src/sdk/models/shared/createfinetuningjobrequest.ts @@ -0,0 +1,114 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Expose, Type } from "class-transformer"; + +/** + * The number of epochs to train the model for. An epoch refers to one + * + * @remarks + * full cycle through the training dataset. + * + */ +export enum CreateFineTuningJobRequestHyperparametersNEpochs1 { + Auto = "auto", +} + +/** + * The hyperparameters used for the fine-tuning job. + */ +export class CreateFineTuningJobRequestHyperparameters extends SpeakeasyBase { + /** + * The number of epochs to train the model for. An epoch refers to one + * + * @remarks + * full cycle through the training dataset. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "n_epochs" }) + nEpochs?: any; +} + +/** + * The name of the model to fine-tune. You can select one of the + * + * @remarks + * [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + * + */ +export enum CreateFineTuningJobRequestModel2 { + Babbage002 = "babbage-002", + Davinci002 = "davinci-002", + Gpt35Turbo = "gpt-3.5-turbo", +} + +export class CreateFineTuningJobRequest extends SpeakeasyBase { + /** + * The hyperparameters used for the fine-tuning job. + */ + @SpeakeasyMetadata() + @Expose({ name: "hyperparameters" }) + @Type(() => CreateFineTuningJobRequestHyperparameters) + hyperparameters?: CreateFineTuningJobRequestHyperparameters; + + /** + * The name of the model to fine-tune. You can select one of the + * + * @remarks + * [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + * + */ + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model: any; + + /** + * A string of up to 40 characters that will be added to your fine-tuned model name. + * + * @remarks + * + * For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "suffix" }) + suffix?: string; + + /** + * The ID of an uploaded file that contains training data. + * + * @remarks + * + * See [upload file](/docs/api-reference/files/upload) for how to upload a file. + * + * Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "training_file" }) + trainingFile: string; + + /** + * The ID of an uploaded file that contains validation data. + * + * @remarks + * + * If you provide this file, the data is used to generate validation + * metrics periodically during fine-tuning. These metrics can be viewed in + * the fine-tuning results file. + * The same data should not be present in both train and validation files. + * + * Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. + * + * See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "validation_file" }) + validationFile?: string; +} diff --git a/src/sdk/models/shared/finetune.ts b/src/sdk/models/shared/finetune.ts index 08b6bc2..a5c3249 100755 --- a/src/sdk/models/shared/finetune.ts +++ b/src/sdk/models/shared/finetune.ts @@ -8,7 +8,7 @@ import { OpenAIFile } from "./openaifile"; import { Expose, Type } from "class-transformer"; /** - * The hyperparameters used for the fine-tuning job. See the [Fine-tuning Guide](/docs/guides/fine-tuning/hyperparameters) for more details. + * The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. */ export class FineTuneHyperparams extends SpeakeasyBase { /** @@ -85,14 +85,16 @@ export class FineTuneHyperparams extends SpeakeasyBase { } /** - * The `FineTune` object represents a fine-tuning job that has been created through the API. + * The `FineTune` object represents a legacy fine-tune job that has been created through the API. * * @remarks * + * + * @deprecated class: This will be removed in a future release, please migrate away from it as soon as possible. */ export class FineTune extends SpeakeasyBase { /** - * The unix timestamp for when the fine-tuning job was created. + * The Unix timestamp (in seconds) for when the fine-tuning job was created. */ @SpeakeasyMetadata() @Expose({ name: "created_at" }) @@ -114,7 +116,7 @@ export class FineTune extends SpeakeasyBase { fineTunedModel: string; /** - * The hyperparameters used for the fine-tuning job. See the [Fine-tuning Guide](/docs/guides/fine-tuning/hyperparameters) for more details. + * The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. */ @SpeakeasyMetadata() @Expose({ name: "hyperparams" }) @@ -158,7 +160,7 @@ export class FineTune extends SpeakeasyBase { resultFiles: OpenAIFile[]; /** - * The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, `succeeded`, `failed`, or `cancelled`. + * The current status of the fine-tuning job, which can be either `created`, `running`, `succeeded`, `failed`, or `cancelled`. */ @SpeakeasyMetadata() @Expose({ name: "status" }) @@ -173,7 +175,7 @@ export class FineTune extends SpeakeasyBase { trainingFiles: OpenAIFile[]; /** - * The unix timestamp for when the fine-tuning job was last updated. + * The Unix timestamp (in seconds) for when the fine-tuning job was last updated. */ @SpeakeasyMetadata() @Expose({ name: "updated_at" }) diff --git a/src/sdk/models/shared/finetuningjob.ts b/src/sdk/models/shared/finetuningjob.ts new file mode 100755 index 0000000..fe96192 --- /dev/null +++ b/src/sdk/models/shared/finetuningjob.ts @@ -0,0 +1,133 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { OpenAIFile } from "./openaifile"; +import { Expose, Type } from "class-transformer"; + +/** + * The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + * + * @remarks + * "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + */ +export enum FineTuningJobHyperparametersNEpochs1 { + Auto = "auto", +} + +/** + * The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ +export class FineTuningJobHyperparameters extends SpeakeasyBase { + /** + * The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + * + * @remarks + * "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + */ + @SpeakeasyMetadata() + @Expose({ name: "n_epochs" }) + nEpochs?: any; +} + +/** + * The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + * + * @remarks + * + */ +export class FineTuningJob extends SpeakeasyBase { + /** + * The Unix timestamp (in seconds) for when the fine-tuning job was created. + */ + @SpeakeasyMetadata() + @Expose({ name: "created_at" }) + createdAt: number; + + /** + * The name of the fine-tuned model that is being created. + */ + @SpeakeasyMetadata() + @Expose({ name: "fine_tuned_model" }) + fineTunedModel: string; + + /** + * The Unix timestamp (in seconds) for when the fine-tuning job was finished. + */ + @SpeakeasyMetadata() + @Expose({ name: "finished_at" }) + finishedAt?: number; + + /** + * The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + */ + @SpeakeasyMetadata() + @Expose({ name: "hyperparameters" }) + @Type(() => FineTuningJobHyperparameters) + hyperparameters: FineTuningJobHyperparameters; + + /** + * The object identifier, which can be referenced in the API endpoints. + */ + @SpeakeasyMetadata() + @Expose({ name: "id" }) + id: string; + + /** + * The base model that is being fine-tuned. + */ + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model: string; + + /** + * The object type, which is always "fine_tuning.job". + */ + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; + + /** + * The organization that owns the fine-tuning job. + */ + @SpeakeasyMetadata() + @Expose({ name: "organization_id" }) + organizationId: string; + + /** + * The compiled results files for the fine-tuning job. + */ + @SpeakeasyMetadata({ elemType: OpenAIFile }) + @Expose({ name: "result_files" }) + @Type(() => OpenAIFile) + resultFiles: OpenAIFile[]; + + /** + * The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, `succeeded`, `failed`, or `cancelled`. + */ + @SpeakeasyMetadata() + @Expose({ name: "status" }) + status: string; + + /** + * The total number of billable tokens processed by this fine tuning job. + */ + @SpeakeasyMetadata() + @Expose({ name: "trained_tokens" }) + trainedTokens: number; + + /** + * The file ID used for training. + */ + @SpeakeasyMetadata() + @Expose({ name: "training_file" }) + trainingFile: string; + + /** + * The file ID used for validation. + */ + @SpeakeasyMetadata() + @Expose({ name: "validation_file" }) + validationFile: string; +} diff --git a/src/sdk/models/shared/finetuningjobevent.ts b/src/sdk/models/shared/finetuningjobevent.ts new file mode 100755 index 0000000..c0f8a76 --- /dev/null +++ b/src/sdk/models/shared/finetuningjobevent.ts @@ -0,0 +1,30 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Expose } from "class-transformer"; + +export enum FineTuningJobEventLevel { + Info = "info", + Warn = "warn", + Error = "error", +} + +export class FineTuningJobEvent extends SpeakeasyBase { + @SpeakeasyMetadata() + @Expose({ name: "created_at" }) + createdAt: number; + + @SpeakeasyMetadata() + @Expose({ name: "level" }) + level: FineTuningJobEventLevel; + + @SpeakeasyMetadata() + @Expose({ name: "message" }) + message: string; + + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; +} diff --git a/src/sdk/models/shared/index.ts b/src/sdk/models/shared/index.ts index c20e423..4ea6256 100755 --- a/src/sdk/models/shared/index.ts +++ b/src/sdk/models/shared/index.ts @@ -17,6 +17,7 @@ export * from "./createembeddingrequest"; export * from "./createembeddingresponse"; export * from "./createfilerequest"; export * from "./createfinetunerequest"; +export * from "./createfinetuningjobrequest"; export * from "./createimageeditrequest2"; export * from "./createimagerequest"; export * from "./createimagevariationrequest2"; @@ -31,11 +32,15 @@ export * from "./deletemodelresponse"; export * from "./embedding"; export * from "./finetune"; export * from "./finetuneevent"; +export * from "./finetuningjob"; +export * from "./finetuningjobevent"; export * from "./image"; export * from "./imagesresponse"; export * from "./listfilesresponse"; export * from "./listfinetuneeventsresponse"; export * from "./listfinetunesresponse"; +export * from "./listfinetuningjobeventsresponse"; export * from "./listmodelsresponse"; +export * from "./listpaginatedfinetuningjobsresponse"; export * from "./model"; export * from "./openaifile"; diff --git a/src/sdk/models/shared/listfinetuningjobeventsresponse.ts b/src/sdk/models/shared/listfinetuningjobeventsresponse.ts new file mode 100755 index 0000000..70be312 --- /dev/null +++ b/src/sdk/models/shared/listfinetuningjobeventsresponse.ts @@ -0,0 +1,21 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { FineTuningJobEvent } from "./finetuningjobevent"; +import { Expose, Type } from "class-transformer"; + +/** + * OK + */ +export class ListFineTuningJobEventsResponse extends SpeakeasyBase { + @SpeakeasyMetadata({ elemType: FineTuningJobEvent }) + @Expose({ name: "data" }) + @Type(() => FineTuningJobEvent) + data: FineTuningJobEvent[]; + + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; +} diff --git a/src/sdk/models/shared/listpaginatedfinetuningjobsresponse.ts b/src/sdk/models/shared/listpaginatedfinetuningjobsresponse.ts new file mode 100755 index 0000000..4068e97 --- /dev/null +++ b/src/sdk/models/shared/listpaginatedfinetuningjobsresponse.ts @@ -0,0 +1,25 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { FineTuningJob } from "./finetuningjob"; +import { Expose, Type } from "class-transformer"; + +/** + * OK + */ +export class ListPaginatedFineTuningJobsResponse extends SpeakeasyBase { + @SpeakeasyMetadata({ elemType: FineTuningJob }) + @Expose({ name: "data" }) + @Type(() => FineTuningJob) + data: FineTuningJob[]; + + @SpeakeasyMetadata() + @Expose({ name: "has_more" }) + hasMore: boolean; + + @SpeakeasyMetadata() + @Expose({ name: "object" }) + object: string; +} diff --git a/src/sdk/models/shared/model.ts b/src/sdk/models/shared/model.ts index 7313588..32b8668 100755 --- a/src/sdk/models/shared/model.ts +++ b/src/sdk/models/shared/model.ts @@ -10,7 +10,7 @@ import { Expose } from "class-transformer"; */ export class Model extends SpeakeasyBase { /** - * The date and time when the model was created. + * The Unix timestamp (in seconds) when the model was created. */ @SpeakeasyMetadata() @Expose({ name: "created" }) diff --git a/src/sdk/models/shared/openaifile.ts b/src/sdk/models/shared/openaifile.ts index 423f53c..32bd435 100755 --- a/src/sdk/models/shared/openaifile.ts +++ b/src/sdk/models/shared/openaifile.ts @@ -20,7 +20,7 @@ export class OpenAIFile extends SpeakeasyBase { bytes: number; /** - * The unix timestamp for when the file was created. + * The Unix timestamp (in seconds) for when the file was created. */ @SpeakeasyMetadata() @Expose({ name: "created_at" }) diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index 0797288..a0382b8 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -23,6 +23,8 @@ export class OpenAI { /** * Immediately cancel a fine-tune job. * + * + * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. */ async cancelFineTune( req: operations.CancelFineTuneRequest, @@ -86,6 +88,80 @@ export class OpenAI { return res; } + /** + * Immediately cancel a fine-tune job. + * + */ + async cancelFineTuningJob( + req: operations.CancelFineTuningJobRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.CancelFineTuningJobRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL( + baseURL, + "/fine_tuning/jobs/{fine_tuning_job_id}/cancel", + req + ); + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + headers["Accept"] = "application/json"; + + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CancelFineTuningJobResponse = + new operations.CancelFineTuningJobResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.fineTuningJob = utils.objectToClass( + JSON.parse(decodedRes), + shared.FineTuningJob + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + /** * Creates a model response for the given chat conversation. */ @@ -497,8 +573,10 @@ export class OpenAI { * * Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. * - * [Learn more about Fine-tuning](/docs/guides/fine-tuning) + * [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + * * + * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. */ async createFineTune( req: shared.CreateFineTuneRequest, @@ -575,6 +653,93 @@ export class OpenAI { return res; } + /** + * Creates a job that fine-tunes a specified model from a given dataset. + * + * Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + * + * [Learn more about fine-tuning](/docs/guides/fine-tuning) + * + */ + async createFineTuningJob( + req: shared.CreateFineTuningJobRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateFineTuningJobRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/fine_tuning/jobs"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...reqBodyHeaders, ...config?.headers }; + if (reqBody == null || Object.keys(reqBody).length === 0) + throw new Error("request body is required"); + headers["Accept"] = "application/json"; + + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateFineTuningJobResponse = + new operations.CreateFineTuningJobResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.fineTuningJob = utils.objectToClass( + JSON.parse(decodedRes), + shared.FineTuningJob + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + /** * Creates an image given a prompt. */ @@ -1132,7 +1297,7 @@ export class OpenAI { } /** - * Delete a fine-tuned model. You must have the Owner role in your organization. + * Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. */ async deleteModel( req: operations.DeleteModelRequest, @@ -1328,6 +1493,8 @@ export class OpenAI { /** * Get fine-grained status updates for a fine-tune job. * + * + * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. */ async listFineTuneEvents( req: operations.ListFineTuneEventsRequest, @@ -1399,6 +1566,8 @@ export class OpenAI { /** * List your organization's fine-tuning jobs * + * + * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. */ async listFineTunes(config?: AxiosRequestConfig): Promise { const baseURL: string = utils.templateUrl( @@ -1458,6 +1627,81 @@ export class OpenAI { return res; } + /** + * Get status updates for a fine-tuning job. + * + */ + async listFineTuningEvents( + req: operations.ListFineTuningEventsRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.ListFineTuningEventsRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL( + baseURL, + "/fine_tuning/jobs/{fine_tuning_job_id}/events", + req + ); + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + const queryParams: string = utils.serializeQueryParams(req); + headers["Accept"] = "application/json"; + + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url + queryParams, + method: "get", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.ListFineTuningEventsResponse = + new operations.ListFineTuningEventsResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.listFineTuningJobEventsResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.ListFineTuningJobEventsResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + /** * Lists the currently available models, and provides basic information about each one such as the owner and availability. */ @@ -1519,6 +1763,77 @@ export class OpenAI { return res; } + /** + * List your organization's fine-tuning jobs + * + */ + async listPaginatedFineTuningJobs( + req: operations.ListPaginatedFineTuningJobsRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.ListPaginatedFineTuningJobsRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/fine_tuning/jobs"; + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + const queryParams: string = utils.serializeQueryParams(req); + headers["Accept"] = "application/json"; + + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url + queryParams, + method: "get", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.ListPaginatedFineTuningJobsResponse = + new operations.ListPaginatedFineTuningJobsResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.listPaginatedFineTuningJobsResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.ListPaginatedFineTuningJobsResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + /** * Returns information about a specific file. */ @@ -1587,8 +1902,10 @@ export class OpenAI { /** * Gets info about the fine-tune job. * - * [Learn more about Fine-tuning](/docs/guides/fine-tuning) + * [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) * + * + * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. */ async retrieveFineTune( req: operations.RetrieveFineTuneRequest, @@ -1652,6 +1969,82 @@ export class OpenAI { return res; } + /** + * Get info about a fine-tuning job. + * + * [Learn more about fine-tuning](/docs/guides/fine-tuning) + * + */ + async retrieveFineTuningJob( + req: operations.RetrieveFineTuningJobRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.RetrieveFineTuningJobRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL( + baseURL, + "/fine_tuning/jobs/{fine_tuning_job_id}", + req + ); + + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + + const headers = { ...config?.headers }; + headers["Accept"] = "application/json"; + + headers[ + "user-agent" + ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.RetrieveFineTuningJobResponse = + new operations.RetrieveFineTuningJobResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.fineTuningJob = utils.objectToClass( + JSON.parse(decodedRes), + shared.FineTuningJob + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + /** * Retrieves a model instance, providing basic information about the model such as the owner and permissioning. */ diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 64cd83a..7dfab19 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,8 +38,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.21.0"; - genVersion = "2.91.2"; + sdkVersion = "2.21.1"; + genVersion = "2.93.0"; public constructor(init?: Partial) { Object.assign(this, init); From e83b3064f38af36645b61f44b8303b118a9c1750 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sun, 3 Sep 2023 00:57:10 +0000 Subject: [PATCH 45/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.77.2 --- RELEASES.md | 12 +++++++++++- docs/models/shared/finetuningjobevent.md | 1 + gen.yaml | 4 ++-- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/models/shared/finetuningjobevent.ts | 4 ++++ src/sdk/sdk.ts | 2 +- 7 files changed, 22 insertions(+), 7 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 007921f..c2621fb 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -520,4 +520,14 @@ Based on: ### Generated - [typescript v2.21.1] . ### Releases -- [NPM v2.21.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.21.1 - . \ No newline at end of file +- [NPM v2.21.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.21.1 - . + +## 2023-09-03 00:56:42 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.77.2 (2.93.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.21.2] . +### Releases +- [NPM v2.21.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.21.2 - . \ No newline at end of file diff --git a/docs/models/shared/finetuningjobevent.md b/docs/models/shared/finetuningjobevent.md index d899f9b..096aed7 100755 --- a/docs/models/shared/finetuningjobevent.md +++ b/docs/models/shared/finetuningjobevent.md @@ -6,6 +6,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | | `createdAt` | *number* | :heavy_check_mark: | N/A | +| `id` | *string* | :heavy_check_mark: | N/A | | `level` | [FineTuningJobEventLevel](../../models/shared/finetuningjobeventlevel.md) | :heavy_check_mark: | N/A | | `message` | *string* | :heavy_check_mark: | N/A | | `object` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 5a631db..9f64aa0 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,6 +1,6 @@ configVersion: 1.0.0 management: - docChecksum: d8bc174529c48277b3d4ce3cf12b17fd + docChecksum: b9d254ee51060898a4e93f4eea8fcef1 docVersion: 2.0.0 speakeasyVersion: 1.77.2 generationVersion: 2.93.0 @@ -15,7 +15,7 @@ features: deprecations: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.21.1 + version: 2.21.2 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 688fdc3..93fe57b 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.21.1", + "version": "2.21.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.21.1", + "version": "2.21.2", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 561221a..d6a96c3 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.21.1", + "version": "2.21.2", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/finetuningjobevent.ts b/src/sdk/models/shared/finetuningjobevent.ts index c0f8a76..43f4a41 100755 --- a/src/sdk/models/shared/finetuningjobevent.ts +++ b/src/sdk/models/shared/finetuningjobevent.ts @@ -16,6 +16,10 @@ export class FineTuningJobEvent extends SpeakeasyBase { @Expose({ name: "created_at" }) createdAt: number; + @SpeakeasyMetadata() + @Expose({ name: "id" }) + id: string; + @SpeakeasyMetadata() @Expose({ name: "level" }) level: FineTuningJobEventLevel; diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 7dfab19..37766ca 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -38,7 +38,7 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.21.1"; + sdkVersion = "2.21.2"; genVersion = "2.93.0"; public constructor(init?: Partial) { From 2a6dd48fe04bda195b745326657d7c7831558913 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Tue, 5 Sep 2023 00:53:53 +0000 Subject: [PATCH 46/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.78.3 --- RELEASES.md | 12 ++++- gen.yaml | 8 +-- package-lock.json | 4 +- package.json | 2 +- src/internal/utils/requestbody.ts | 6 +-- src/internal/utils/security.ts | 83 +++++++++++++++---------------- src/sdk/openai.ts | 56 --------------------- src/sdk/sdk.ts | 8 +-- 8 files changed, 62 insertions(+), 117 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index c2621fb..71aba60 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -530,4 +530,14 @@ Based on: ### Generated - [typescript v2.21.2] . ### Releases -- [NPM v2.21.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.21.2 - . \ No newline at end of file +- [NPM v2.21.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.21.2 - . + +## 2023-09-05 00:53:17 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.78.3 (2.96.3) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.22.0] . +### Releases +- [NPM v2.22.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 9f64aa0..afdc3c7 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: b9d254ee51060898a4e93f4eea8fcef1 docVersion: 2.0.0 - speakeasyVersion: 1.77.2 - generationVersion: 2.93.0 + speakeasyVersion: 1.78.3 + generationVersion: 2.96.3 generation: sdkClassName: gpt sdkFlattening: true @@ -11,11 +11,11 @@ generation: telemetryEnabled: false features: typescript: - core: 2.85.2 + core: 2.87.0 deprecations: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.21.2 + version: 2.22.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 93fe57b..1dbc436 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.21.2", + "version": "2.22.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.21.2", + "version": "2.22.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index d6a96c3..48a931b 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.21.2", + "version": "2.22.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/internal/utils/requestbody.ts b/src/internal/utils/requestbody.ts index 734969f..f8f4691 100755 --- a/src/internal/utils/requestbody.ts +++ b/src/internal/utils/requestbody.ts @@ -15,7 +15,7 @@ export function serializeRequestBody( request: any, requestFieldName: string, serializationMethod: string -): [object, any] { +): [Record, any] { if ( request !== Object(request) || !request.hasOwnProperty(requestFieldName) @@ -46,8 +46,8 @@ export function serializeRequestBody( const serializeContentType = ( contentType: string, reqBody: any -): [object, any] => { - let [requestHeaders, requestBody]: [object, any] = [{}, {}]; +): [Record, any] => { + let [requestHeaders, requestBody]: [Record, any] = [{}, {}]; switch (contentType) { case "multipart/form-data": diff --git a/src/internal/utils/security.ts b/src/internal/utils/security.ts index d8939d6..8f183b4 100755 --- a/src/internal/utils/security.ts +++ b/src/internal/utils/security.ts @@ -2,15 +2,17 @@ * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. */ -import { AxiosInstance } from "axios"; - const securityMetadataKey = "security"; -export function createSecurityClient( - client: AxiosInstance, +export type SecurityProperties = { + params: Record, + headers: Record, +} + +export function parseSecurityProperties( security: any -): AxiosInstance { - return parseSecurityClass(client, security); +): SecurityProperties { + return parseSecurityClass(security); } function parseSecurityDecorator(securityAnn: string): SecurityDecorator { @@ -51,10 +53,13 @@ function parseSecurityDecorator(securityAnn: string): SecurityDecorator { } function parseSecurityClass( - client: AxiosInstance, security: any -): AxiosInstance { +): SecurityProperties { const fieldNames: string[] = Object.getOwnPropertyNames(security); + const properties: SecurityProperties = { + params: {}, + headers: {}, + } fieldNames.forEach((fname) => { const securityAnn: string = Reflect.getMetadata( securityMetadataKey, @@ -69,23 +74,23 @@ function parseSecurityClass( const value = security[fname]; if (securityDecorator.Option) { - return parseSecurityOption(client, value); + return parseSecurityOption(properties, value); } else if (securityDecorator.Scheme) { if (securityDecorator.SubType === "basic" && value !== Object(value)) { - return parseSecurityScheme(client, securityDecorator, security); + return parseSecurityScheme(properties, securityDecorator, security); } else { - client = parseSecurityScheme(client, securityDecorator, value); + return parseSecurityScheme(properties, securityDecorator, value); } } }); - return client; + return properties; } function parseSecurityOption( - client: AxiosInstance, + properties: SecurityProperties, optionType: any -): AxiosInstance { +): void { const fieldNames: string[] = Object.getOwnPropertyNames(optionType); fieldNames.forEach((fname) => { const securityAnn: string = Reflect.getMetadata( @@ -97,23 +102,21 @@ function parseSecurityOption( const securityDecorator: SecurityDecorator = parseSecurityDecorator(securityAnn); if (securityDecorator == null || !securityDecorator.Scheme) return; - return parseSecurityScheme(client, securityDecorator, optionType[fname]); + return parseSecurityScheme(properties, securityDecorator, optionType[fname]); }); - - return client; } function parseSecurityScheme( - client: AxiosInstance, + properties: SecurityProperties, schemeDecorator: SecurityDecorator, scheme: any -): AxiosInstance { +): void { if (scheme === Object(scheme)) { if ( schemeDecorator.Type === "http" && schemeDecorator.SubType === "basic" ) { - return parseBasicAuthScheme(client, scheme); + return parseBasicAuthScheme(properties, scheme); } const fieldNames: string[] = Object.getOwnPropertyNames(scheme); @@ -128,46 +131,44 @@ function parseSecurityScheme( parseSecurityDecorator(securityAnn); if (securityDecorator == null || securityDecorator.Name === "") return; - client = parseSecuritySchemeValue( - client, + return parseSecuritySchemeValue( + properties, schemeDecorator, securityDecorator, scheme[fname] ); }); } else { - client = parseSecuritySchemeValue( - client, + return parseSecuritySchemeValue( + properties, schemeDecorator, schemeDecorator, scheme ); } - - return client; } function parseSecuritySchemeValue( - client: AxiosInstance, + properties: SecurityProperties, schemeDecorator: SecurityDecorator, securityDecorator: SecurityDecorator, value: any -): AxiosInstance { +): void { switch (schemeDecorator.Type) { case "apiKey": switch (schemeDecorator.SubType) { case "header": - client.defaults.headers.common[securityDecorator.Name] = value; + properties.headers[securityDecorator.Name] = value; break; case "query": - client.defaults.params[securityDecorator.Name] = value; + properties.params[securityDecorator.Name] = value; break; case "cookie": { const securityDecoratorName: string = securityDecorator.Name; const val: string = value; - client.defaults.headers.common[ + properties.headers[ "Cookie" - ] = `${securityDecoratorName}=${val}`; + ] = `${securityDecoratorName}=${val}`; break; } default: @@ -175,17 +176,17 @@ function parseSecuritySchemeValue( } break; case "openIdConnect": - client.defaults.headers.common[securityDecorator.Name] = value; + properties.headers[securityDecorator.Name] = value; break; case "oauth2": - client.defaults.headers.common[securityDecorator.Name] = value; + properties.headers[securityDecorator.Name] = value; break; case "http": switch (schemeDecorator.SubType) { case "basic": break; case "bearer": - client.defaults.headers.common[securityDecorator.Name] = value.toLowerCase().startsWith("bearer ") ? value : `Bearer ${value}`; + properties.headers[securityDecorator.Name] = value.toLowerCase().startsWith("bearer ") ? value : `Bearer ${value}`; break; default: throw new Error("not supported"); @@ -194,14 +195,12 @@ function parseSecuritySchemeValue( default: throw new Error("not supported"); } - - return client; } function parseBasicAuthScheme( - client: AxiosInstance, + properties: SecurityProperties, scheme: any -): AxiosInstance { +): void { let username, password = ""; @@ -227,11 +226,7 @@ function parseBasicAuthScheme( } }); - client.defaults.headers.common["Authorization"] = `Basic ${Buffer.from( - `${username}:${password}` - ).toString("base64")}`; - - return client; + properties.headers["Authorization"] = `Basic ${Buffer.from(`${username}:${password}`).toString('base64')}`; } class SecurityDecorator { diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index a0382b8..62875e0 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -39,9 +39,7 @@ export class OpenAI { this.sdkConfiguration.serverDefaults ); const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}/cancel", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; headers["Accept"] = "application/json"; @@ -109,9 +107,7 @@ export class OpenAI { "/fine_tuning/jobs/{fine_tuning_job_id}/cancel", req ); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; headers["Accept"] = "application/json"; @@ -188,9 +184,7 @@ export class OpenAI { throw new Error(`Error serializing request body, cause: ${e.message}`); } } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); @@ -270,9 +264,7 @@ export class OpenAI { throw new Error(`Error serializing request body, cause: ${e.message}`); } } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); @@ -353,9 +345,7 @@ export class OpenAI { throw new Error(`Error serializing request body, cause: ${e.message}`); } } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); @@ -434,9 +424,7 @@ export class OpenAI { throw new Error(`Error serializing request body, cause: ${e.message}`); } } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); @@ -516,9 +504,7 @@ export class OpenAI { throw new Error(`Error serializing request body, cause: ${e.message}`); } } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); @@ -601,9 +587,7 @@ export class OpenAI { throw new Error(`Error serializing request body, cause: ${e.message}`); } } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); @@ -684,9 +668,7 @@ export class OpenAI { throw new Error(`Error serializing request body, cause: ${e.message}`); } } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); @@ -766,9 +748,7 @@ export class OpenAI { throw new Error(`Error serializing request body, cause: ${e.message}`); } } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); @@ -847,9 +827,7 @@ export class OpenAI { throw new Error(`Error serializing request body, cause: ${e.message}`); } } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); @@ -928,9 +906,7 @@ export class OpenAI { throw new Error(`Error serializing request body, cause: ${e.message}`); } } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); @@ -1010,9 +986,7 @@ export class OpenAI { throw new Error(`Error serializing request body, cause: ${e.message}`); } } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); @@ -1091,9 +1065,7 @@ export class OpenAI { throw new Error(`Error serializing request body, cause: ${e.message}`); } } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); @@ -1173,9 +1145,7 @@ export class OpenAI { throw new Error(`Error serializing request body, cause: ${e.message}`); } } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); @@ -1244,9 +1214,7 @@ export class OpenAI { this.sdkConfiguration.serverDefaults ); const url: string = utils.generateURL(baseURL, "/files/{file_id}", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; headers["Accept"] = "application/json"; @@ -1312,9 +1280,7 @@ export class OpenAI { this.sdkConfiguration.serverDefaults ); const url: string = utils.generateURL(baseURL, "/models/{model}", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; headers["Accept"] = "application/json"; @@ -1380,9 +1346,7 @@ export class OpenAI { this.sdkConfiguration.serverDefaults ); const url: string = utils.generateURL(baseURL, "/files/{file_id}/content", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; headers["Accept"] = "application/json"; @@ -1438,9 +1402,7 @@ export class OpenAI { this.sdkConfiguration.serverDefaults ); const url: string = baseURL.replace(/\/$/, "") + "/files"; - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; headers["Accept"] = "application/json"; @@ -1509,9 +1471,7 @@ export class OpenAI { this.sdkConfiguration.serverDefaults ); const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}/events", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; const queryParams: string = utils.serializeQueryParams(req); headers["Accept"] = "application/json"; @@ -1575,9 +1535,7 @@ export class OpenAI { this.sdkConfiguration.serverDefaults ); const url: string = baseURL.replace(/\/$/, "") + "/fine-tunes"; - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; headers["Accept"] = "application/json"; @@ -1648,9 +1606,7 @@ export class OpenAI { "/fine_tuning/jobs/{fine_tuning_job_id}/events", req ); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; const queryParams: string = utils.serializeQueryParams(req); headers["Accept"] = "application/json"; @@ -1711,9 +1667,7 @@ export class OpenAI { this.sdkConfiguration.serverDefaults ); const url: string = baseURL.replace(/\/$/, "") + "/models"; - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; headers["Accept"] = "application/json"; @@ -1780,9 +1734,7 @@ export class OpenAI { this.sdkConfiguration.serverDefaults ); const url: string = baseURL.replace(/\/$/, "") + "/fine_tuning/jobs"; - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; const queryParams: string = utils.serializeQueryParams(req); headers["Accept"] = "application/json"; @@ -1850,9 +1802,7 @@ export class OpenAI { this.sdkConfiguration.serverDefaults ); const url: string = utils.generateURL(baseURL, "/files/{file_id}", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; headers["Accept"] = "application/json"; @@ -1920,9 +1870,7 @@ export class OpenAI { this.sdkConfiguration.serverDefaults ); const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; headers["Accept"] = "application/json"; @@ -1992,9 +1940,7 @@ export class OpenAI { "/fine_tuning/jobs/{fine_tuning_job_id}", req ); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; headers["Accept"] = "application/json"; @@ -2061,9 +2007,7 @@ export class OpenAI { this.sdkConfiguration.serverDefaults ); const url: string = utils.generateURL(baseURL, "/models/{model}", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; headers["Accept"] = "application/json"; diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 37766ca..35cb5b5 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -33,13 +33,12 @@ export type SDKProps = { export class SDKConfiguration { defaultClient: AxiosInstance; - securityClient: AxiosInstance; serverURL: string; serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.21.2"; - genVersion = "2.93.0"; + sdkVersion = "2.22.0"; + genVersion = "2.96.3"; public constructor(init?: Partial) { Object.assign(this, init); @@ -66,11 +65,8 @@ export class Gpt { } const defaultClient = props?.defaultClient ?? axios.create({ baseURL: serverURL }); - const securityClient = defaultClient; - this.sdkConfiguration = new SDKConfiguration({ defaultClient: defaultClient, - securityClient: securityClient, serverURL: serverURL, }); From 3d7d1c83389f48d0895975c070c54444f7303516 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 6 Sep 2023 00:54:40 +0000 Subject: [PATCH 47/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.78.8 --- README.md | 8 +- RELEASES.md | 12 +- USAGE.md | 6 +- .../operations/cancelfinetuningjobrequest.md | 6 +- docs/models/operations/deletefilerequest.md | 6 +- docs/models/operations/downloadfilerequest.md | 6 +- docs/models/operations/retrievefilerequest.md | 6 +- .../retrievefinetuningjobrequest.md | 6 +- docs/models/shared/finetuningjob.md | 30 +- docs/models/shared/security.md | 8 + docs/sdks/gpt/README.md | 2 +- docs/sdks/openai/README.md | 172 +++++++++-- files.gen | 2 + gen.yaml | 9 +- package-lock.json | 4 +- package.json | 2 +- .../models/operations/cancelfinetuningjob.ts | 2 +- src/sdk/models/operations/deletefile.ts | 2 +- src/sdk/models/operations/downloadfile.ts | 2 +- src/sdk/models/operations/retrievefile.ts | 2 +- .../operations/retrievefinetuningjob.ts | 2 +- src/sdk/models/shared/finetuningjob.ts | 8 +- src/sdk/models/shared/index.ts | 1 + src/sdk/models/shared/security.ts | 12 + src/sdk/openai.ts | 282 ++++++++++++++++-- src/sdk/sdk.ts | 13 +- 26 files changed, 498 insertions(+), 113 deletions(-) create mode 100755 docs/models/shared/security.md create mode 100755 src/sdk/models/shared/security.ts diff --git a/README.md b/README.md index e27544d..46a24f5 100755 --- a/README.md +++ b/README.md @@ -45,7 +45,11 @@ Authorization: Bearer YOUR_API_KEY import { Gpt } from "@speakeasy-api/openai"; import { CancelFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.cancelFineTune({ fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", @@ -93,7 +97,7 @@ Response includes details of the enqueued job including job status and the name * [createTranslation](docs/sdks/openai/README.md#createtranslation) - Translates audio into English. * [deleteFile](docs/sdks/openai/README.md#deletefile) - Delete a file. * [deleteModel](docs/sdks/openai/README.md#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. -* [downloadFile](docs/sdks/openai/README.md#downloadfile) - Returns the contents of the specified file +* [downloadFile](docs/sdks/openai/README.md#downloadfile) - Returns the contents of the specified file. * [listFiles](docs/sdks/openai/README.md#listfiles) - Returns a list of files that belong to the user's organization. * [~~listFineTuneEvents~~](docs/sdks/openai/README.md#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. :warning: **Deprecated** diff --git a/RELEASES.md b/RELEASES.md index 71aba60..9c86142 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -540,4 +540,14 @@ Based on: ### Generated - [typescript v2.22.0] . ### Releases -- [NPM v2.22.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.0 - . \ No newline at end of file +- [NPM v2.22.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.0 - . + +## 2023-09-06 00:54:15 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.78.8 (2.96.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.22.1] . +### Releases +- [NPM v2.22.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.1 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index c3e64a8..37c1178 100755 --- a/USAGE.md +++ b/USAGE.md @@ -5,7 +5,11 @@ import { Gpt } from "@speakeasy-api/openai"; import { CancelFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.cancelFineTune({ fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", diff --git a/docs/models/operations/cancelfinetuningjobrequest.md b/docs/models/operations/cancelfinetuningjobrequest.md index 07bc63b..ec1e472 100755 --- a/docs/models/operations/cancelfinetuningjobrequest.md +++ b/docs/models/operations/cancelfinetuningjobrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | -| `fineTuningJobId` | *string* | :heavy_check_mark: | The ID of the fine-tuning job to cancel
| ft-AF1WoRqd3aJAHsqc9NY7iL8F | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------- | ----------------------------------------- | ----------------------------------------- | ----------------------------------------- | ----------------------------------------- | +| `fineTuningJobId` | *string* | :heavy_check_mark: | The ID of the fine-tuning job to cancel.
| ft-AF1WoRqd3aJAHsqc9NY7iL8F | \ No newline at end of file diff --git a/docs/models/operations/deletefilerequest.md b/docs/models/operations/deletefilerequest.md index 476343d..ee36ece 100755 --- a/docs/models/operations/deletefilerequest.md +++ b/docs/models/operations/deletefilerequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `fileId` | *string* | :heavy_check_mark: | The ID of the file to use for this request | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | +| `fileId` | *string* | :heavy_check_mark: | The ID of the file to use for this request. | \ No newline at end of file diff --git a/docs/models/operations/downloadfilerequest.md b/docs/models/operations/downloadfilerequest.md index fb50706..0167276 100755 --- a/docs/models/operations/downloadfilerequest.md +++ b/docs/models/operations/downloadfilerequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `fileId` | *string* | :heavy_check_mark: | The ID of the file to use for this request | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | +| `fileId` | *string* | :heavy_check_mark: | The ID of the file to use for this request. | \ No newline at end of file diff --git a/docs/models/operations/retrievefilerequest.md b/docs/models/operations/retrievefilerequest.md index 6bf8992..842fd4e 100755 --- a/docs/models/operations/retrievefilerequest.md +++ b/docs/models/operations/retrievefilerequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `fileId` | *string* | :heavy_check_mark: | The ID of the file to use for this request | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | +| `fileId` | *string* | :heavy_check_mark: | The ID of the file to use for this request. | \ No newline at end of file diff --git a/docs/models/operations/retrievefinetuningjobrequest.md b/docs/models/operations/retrievefinetuningjobrequest.md index b509088..559136c 100755 --- a/docs/models/operations/retrievefinetuningjobrequest.md +++ b/docs/models/operations/retrievefinetuningjobrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | -| `fineTuningJobId` | *string* | :heavy_check_mark: | The ID of the fine-tuning job
| ft-AF1WoRqd3aJAHsqc9NY7iL8F | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | +| `fineTuningJobId` | *string* | :heavy_check_mark: | The ID of the fine-tuning job.
| ft-AF1WoRqd3aJAHsqc9NY7iL8F | \ No newline at end of file diff --git a/docs/models/shared/finetuningjob.md b/docs/models/shared/finetuningjob.md index fe5b4f1..e183c8b 100755 --- a/docs/models/shared/finetuningjob.md +++ b/docs/models/shared/finetuningjob.md @@ -6,18 +6,18 @@ The `fine_tuning.job` object represents a fine-tuning job that has been created ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -| `createdAt` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) for when the fine-tuning job was created. | -| `fineTunedModel` | *string* | :heavy_check_mark: | The name of the fine-tuned model that is being created. | -| `finishedAt` | *number* | :heavy_minus_sign: | The Unix timestamp (in seconds) for when the fine-tuning job was finished. | -| `hyperparameters` | [FineTuningJobHyperparameters](../../models/shared/finetuningjobhyperparameters.md) | :heavy_check_mark: | The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. | -| `id` | *string* | :heavy_check_mark: | The object identifier, which can be referenced in the API endpoints. | -| `model` | *string* | :heavy_check_mark: | The base model that is being fine-tuned. | -| `object` | *string* | :heavy_check_mark: | The object type, which is always "fine_tuning.job". | -| `organizationId` | *string* | :heavy_check_mark: | The organization that owns the fine-tuning job. | -| `resultFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | The compiled results files for the fine-tuning job. | -| `status` | *string* | :heavy_check_mark: | The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, `succeeded`, `failed`, or `cancelled`. | -| `trainedTokens` | *number* | :heavy_check_mark: | The total number of billable tokens processed by this fine tuning job. | -| `trainingFile` | *string* | :heavy_check_mark: | The file ID used for training. | -| `validationFile` | *string* | :heavy_check_mark: | The file ID used for validation. | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `createdAt` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) for when the fine-tuning job was created. | +| `fineTunedModel` | *string* | :heavy_check_mark: | The name of the fine-tuned model that is being created. | +| `finishedAt` | *number* | :heavy_minus_sign: | The Unix timestamp (in seconds) for when the fine-tuning job was finished. | +| `hyperparameters` | [FineTuningJobHyperparameters](../../models/shared/finetuningjobhyperparameters.md) | :heavy_check_mark: | The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. | +| `id` | *string* | :heavy_check_mark: | The object identifier, which can be referenced in the API endpoints. | +| `model` | *string* | :heavy_check_mark: | The base model that is being fine-tuned. | +| `object` | *string* | :heavy_check_mark: | The object type, which is always "fine_tuning.job". | +| `organizationId` | *string* | :heavy_check_mark: | The organization that owns the fine-tuning job. | +| `resultFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). | +| `status` | *string* | :heavy_check_mark: | The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, `succeeded`, `failed`, or `cancelled`. | +| `trainedTokens` | *number* | :heavy_check_mark: | The total number of billable tokens processed by this fine-tuning job. | +| `trainingFile` | *string* | :heavy_check_mark: | The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). | +| `validationFile` | *string* | :heavy_check_mark: | The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). | \ No newline at end of file diff --git a/docs/models/shared/security.md b/docs/models/shared/security.md new file mode 100755 index 0000000..1132c5c --- /dev/null +++ b/docs/models/shared/security.md @@ -0,0 +1,8 @@ +# Security + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | +| `apiKeyAuth` | *string* | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/sdks/gpt/README.md b/docs/sdks/gpt/README.md index 411338d..b26d90d 100755 --- a/docs/sdks/gpt/README.md +++ b/docs/sdks/gpt/README.md @@ -2,7 +2,7 @@ ## Overview -OpenAI API: APIs for sampling from and fine-tuning language models +OpenAI API: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. ### Available Operations diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index ad63d9d..2673365 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -36,7 +36,7 @@ Response includes details of the enqueued job including job status and the name * [createTranslation](#createtranslation) - Translates audio into English. * [deleteFile](#deletefile) - Delete a file. * [deleteModel](#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. -* [downloadFile](#downloadfile) - Returns the contents of the specified file +* [downloadFile](#downloadfile) - Returns the contents of the specified file. * [listFiles](#listfiles) - Returns a list of files that belong to the user's organization. * [~~listFineTuneEvents~~](#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. :warning: **Deprecated** @@ -71,7 +71,11 @@ Immediately cancel a fine-tune job. import { Gpt } from "@speakeasy-api/openai"; import { CancelFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.cancelFineTune({ fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", @@ -106,7 +110,11 @@ Immediately cancel a fine-tune job. import { Gpt } from "@speakeasy-api/openai"; import { CancelFineTuningJobResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.cancelFineTuningJob({ fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", @@ -145,7 +153,11 @@ import { CreateChatCompletionRequestModel2, } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.createChatCompletion({ frequencyPenalty: 5488.14, @@ -251,7 +263,11 @@ import { Gpt } from "@speakeasy-api/openai"; import { CreateCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; import { CreateCompletionRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.createCompletion({ bestOf: 210382, @@ -306,7 +322,11 @@ import { Gpt } from "@speakeasy-api/openai"; import { CreateEditResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; import { CreateEditRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.createEdit({ input: "What day of the wek is it?", @@ -346,7 +366,11 @@ import { Gpt } from "@speakeasy-api/openai"; import { CreateEmbeddingResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; import { CreateEmbeddingRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.createEmbedding({ input: [ @@ -398,7 +422,11 @@ Upload a file that contains document(s) to be used across various endpoints/feat import { Gpt } from "@speakeasy-api/openai"; import { CreateFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.createFile({ file: { @@ -444,7 +472,11 @@ import { Gpt } from "@speakeasy-api/openai"; import { CreateFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; import { CreateFineTuneRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.createFineTune({ batchSize: 158969, @@ -501,7 +533,11 @@ import { CreateFineTuningJobRequestModel2, } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.createFineTuningJob({ hyperparameters: { @@ -542,7 +578,11 @@ import { Gpt } from "@speakeasy-api/openai"; import { CreateImageResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; import { CreateImageRequestResponseFormat, CreateImageRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.createImage({ n: 1, @@ -581,7 +621,11 @@ import { Gpt } from "@speakeasy-api/openai"; import { CreateImageEditResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; import { CreateImageEditRequestResponseFormat, CreateImageEditRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.createImageEdit({ image: { @@ -628,7 +672,11 @@ import { Gpt } from "@speakeasy-api/openai"; import { CreateImageVariationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; import { CreateImageVariationRequestResponseFormat, CreateImageVariationRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.createImageVariation({ image: { @@ -670,7 +718,11 @@ import { Gpt } from "@speakeasy-api/openai"; import { CreateModerationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; import { CreateModerationRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.createModeration({ input: [ @@ -709,7 +761,11 @@ import { Gpt } from "@speakeasy-api/openai"; import { CreateTranscriptionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; import { CreateTranscriptionRequestModel2, CreateTranscriptionRequestResponseFormat } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.createTranscription({ file: { @@ -752,7 +808,11 @@ import { Gpt } from "@speakeasy-api/openai"; import { CreateTranslationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; import { CreateTranslationRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.createTranslation({ file: { @@ -793,7 +853,11 @@ Delete a file. import { Gpt } from "@speakeasy-api/openai"; import { DeleteFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.deleteFile({ fileId: "iusto", @@ -827,7 +891,11 @@ Delete a fine-tuned model. You must have the Owner role in your organization to import { Gpt } from "@speakeasy-api/openai"; import { DeleteModelResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.deleteModel({ model: "ft:gpt-3.5-turbo:acemeco:suffix:abc123", @@ -853,7 +921,7 @@ sdk.openAI.deleteModel({ ## downloadFile -Returns the contents of the specified file +Returns the contents of the specified file. ### Example Usage @@ -861,7 +929,11 @@ Returns the contents of the specified file import { Gpt } from "@speakeasy-api/openai"; import { DownloadFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.downloadFile({ fileId: "dicta", @@ -895,7 +967,11 @@ Returns a list of files that belong to the user's organization. import { Gpt } from "@speakeasy-api/openai"; import { ListFilesResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.listFiles().then((res: ListFilesResponse) => { if (res.statusCode == 200) { @@ -929,7 +1005,11 @@ Get fine-grained status updates for a fine-tune job. import { Gpt } from "@speakeasy-api/openai"; import { ListFineTuneEventsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.listFineTuneEvents({ fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", @@ -967,7 +1047,11 @@ List your organization's fine-tuning jobs import { Gpt } from "@speakeasy-api/openai"; import { ListFineTunesResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.listFineTunes().then((res: ListFineTunesResponse) => { if (res.statusCode == 200) { @@ -999,7 +1083,11 @@ Get status updates for a fine-tuning job. import { Gpt } from "@speakeasy-api/openai"; import { ListFineTuningEventsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.listFineTuningEvents({ after: "harum", @@ -1035,7 +1123,11 @@ Lists the currently available models, and provides basic information about each import { Gpt } from "@speakeasy-api/openai"; import { ListModelsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.listModels().then((res: ListModelsResponse) => { if (res.statusCode == 200) { @@ -1067,7 +1159,11 @@ List your organization's fine-tuning jobs import { Gpt } from "@speakeasy-api/openai"; import { ListPaginatedFineTuningJobsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.listPaginatedFineTuningJobs({ after: "accusamus", @@ -1102,7 +1198,11 @@ Returns information about a specific file. import { Gpt } from "@speakeasy-api/openai"; import { RetrieveFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.retrieveFile({ fileId: "repudiandae", @@ -1141,7 +1241,11 @@ Gets info about the fine-tune job. import { Gpt } from "@speakeasy-api/openai"; import { RetrieveFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.retrieveFineTune({ fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", @@ -1178,7 +1282,11 @@ Get info about a fine-tuning job. import { Gpt } from "@speakeasy-api/openai"; import { RetrieveFineTuningJobResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.retrieveFineTuningJob({ fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", @@ -1212,7 +1320,11 @@ Retrieves a model instance, providing basic information about the model such as import { Gpt } from "@speakeasy-api/openai"; import { RetrieveModelResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt(); +const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, +}); sdk.openAI.retrieveModel({ model: "gpt-3.5-turbo", diff --git a/files.gen b/files.gen index adf8e93..45dbfaa 100755 --- a/files.gen +++ b/files.gen @@ -90,6 +90,7 @@ src/sdk/models/shared/finetuningjobevent.ts src/sdk/models/shared/listmodelsresponse.ts src/sdk/models/shared/model.ts src/sdk/models/shared/listpaginatedfinetuningjobsresponse.ts +src/sdk/models/shared/security.ts src/sdk/models/shared/index.ts src/sdk/models/errors/index.ts docs/sdks/gpt/README.md @@ -221,4 +222,5 @@ docs/models/shared/finetuningjobevent.md docs/models/shared/listmodelsresponse.md docs/models/shared/model.md docs/models/shared/listpaginatedfinetuningjobsresponse.md +docs/models/shared/security.md .gitattributes \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index afdc3c7..5d26019 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,9 +1,9 @@ configVersion: 1.0.0 management: - docChecksum: b9d254ee51060898a4e93f4eea8fcef1 + docChecksum: 6b9863d82dd2ad263778aa831eac9296 docVersion: 2.0.0 - speakeasyVersion: 1.78.3 - generationVersion: 2.96.3 + speakeasyVersion: 1.78.8 + generationVersion: 2.96.6 generation: sdkClassName: gpt sdkFlattening: true @@ -13,9 +13,10 @@ features: typescript: core: 2.87.0 deprecations: 2.81.1 + globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.22.0 + version: 2.22.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 1dbc436..997c6a1 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.0", + "version": "2.22.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.22.0", + "version": "2.22.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 48a931b..2f8d363 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.0", + "version": "2.22.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/operations/cancelfinetuningjob.ts b/src/sdk/models/operations/cancelfinetuningjob.ts index 9aa60f7..06601a7 100755 --- a/src/sdk/models/operations/cancelfinetuningjob.ts +++ b/src/sdk/models/operations/cancelfinetuningjob.ts @@ -8,7 +8,7 @@ import { AxiosResponse } from "axios"; export class CancelFineTuningJobRequest extends SpeakeasyBase { /** - * The ID of the fine-tuning job to cancel + * The ID of the fine-tuning job to cancel. * * @remarks * diff --git a/src/sdk/models/operations/deletefile.ts b/src/sdk/models/operations/deletefile.ts index c0dd90b..751df75 100755 --- a/src/sdk/models/operations/deletefile.ts +++ b/src/sdk/models/operations/deletefile.ts @@ -8,7 +8,7 @@ import { AxiosResponse } from "axios"; export class DeleteFileRequest extends SpeakeasyBase { /** - * The ID of the file to use for this request + * The ID of the file to use for this request. */ @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=file_id" }) fileId: string; diff --git a/src/sdk/models/operations/downloadfile.ts b/src/sdk/models/operations/downloadfile.ts index 365ae12..dabdf89 100755 --- a/src/sdk/models/operations/downloadfile.ts +++ b/src/sdk/models/operations/downloadfile.ts @@ -7,7 +7,7 @@ import { AxiosResponse } from "axios"; export class DownloadFileRequest extends SpeakeasyBase { /** - * The ID of the file to use for this request + * The ID of the file to use for this request. */ @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=file_id" }) fileId: string; diff --git a/src/sdk/models/operations/retrievefile.ts b/src/sdk/models/operations/retrievefile.ts index 0c443f9..be3a1cf 100755 --- a/src/sdk/models/operations/retrievefile.ts +++ b/src/sdk/models/operations/retrievefile.ts @@ -8,7 +8,7 @@ import { AxiosResponse } from "axios"; export class RetrieveFileRequest extends SpeakeasyBase { /** - * The ID of the file to use for this request + * The ID of the file to use for this request. */ @SpeakeasyMetadata({ data: "pathParam, style=simple;explode=false;name=file_id" }) fileId: string; diff --git a/src/sdk/models/operations/retrievefinetuningjob.ts b/src/sdk/models/operations/retrievefinetuningjob.ts index 917a8a2..b32ee91 100755 --- a/src/sdk/models/operations/retrievefinetuningjob.ts +++ b/src/sdk/models/operations/retrievefinetuningjob.ts @@ -8,7 +8,7 @@ import { AxiosResponse } from "axios"; export class RetrieveFineTuningJobRequest extends SpeakeasyBase { /** - * The ID of the fine-tuning job + * The ID of the fine-tuning job. * * @remarks * diff --git a/src/sdk/models/shared/finetuningjob.ts b/src/sdk/models/shared/finetuningjob.ts index fe96192..971d222 100755 --- a/src/sdk/models/shared/finetuningjob.ts +++ b/src/sdk/models/shared/finetuningjob.ts @@ -96,7 +96,7 @@ export class FineTuningJob extends SpeakeasyBase { organizationId: string; /** - * The compiled results files for the fine-tuning job. + * The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). */ @SpeakeasyMetadata({ elemType: OpenAIFile }) @Expose({ name: "result_files" }) @@ -111,21 +111,21 @@ export class FineTuningJob extends SpeakeasyBase { status: string; /** - * The total number of billable tokens processed by this fine tuning job. + * The total number of billable tokens processed by this fine-tuning job. */ @SpeakeasyMetadata() @Expose({ name: "trained_tokens" }) trainedTokens: number; /** - * The file ID used for training. + * The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). */ @SpeakeasyMetadata() @Expose({ name: "training_file" }) trainingFile: string; /** - * The file ID used for validation. + * The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). */ @SpeakeasyMetadata() @Expose({ name: "validation_file" }) diff --git a/src/sdk/models/shared/index.ts b/src/sdk/models/shared/index.ts index 4ea6256..02dca0f 100755 --- a/src/sdk/models/shared/index.ts +++ b/src/sdk/models/shared/index.ts @@ -44,3 +44,4 @@ export * from "./listmodelsresponse"; export * from "./listpaginatedfinetuningjobsresponse"; export * from "./model"; export * from "./openaifile"; +export * from "./security"; diff --git a/src/sdk/models/shared/security.ts b/src/sdk/models/shared/security.ts new file mode 100755 index 0000000..a447dc5 --- /dev/null +++ b/src/sdk/models/shared/security.ts @@ -0,0 +1,12 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; + +export class Security extends SpeakeasyBase { + @SpeakeasyMetadata({ + data: "security, scheme=true;type=http;subtype=bearer;name=Authorization", + }) + apiKeyAuth: string; +} diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index 62875e0..0c0c9ee 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -40,7 +40,15 @@ export class OpenAI { ); const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}/cancel", req); const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -108,7 +116,15 @@ export class OpenAI { req ); const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -185,7 +201,15 @@ export class OpenAI { } } const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -265,7 +289,15 @@ export class OpenAI { } } const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -346,7 +378,15 @@ export class OpenAI { } } const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -425,7 +465,15 @@ export class OpenAI { } } const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -505,7 +553,15 @@ export class OpenAI { } } const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -588,7 +644,15 @@ export class OpenAI { } } const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -669,7 +733,15 @@ export class OpenAI { } } const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -749,7 +821,15 @@ export class OpenAI { } } const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -828,7 +908,15 @@ export class OpenAI { } } const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -907,7 +995,15 @@ export class OpenAI { } } const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -987,7 +1083,15 @@ export class OpenAI { } } const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -1066,7 +1170,15 @@ export class OpenAI { } } const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -1146,7 +1258,15 @@ export class OpenAI { } } const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...reqBodyHeaders, ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; if (reqBody == null || Object.keys(reqBody).length === 0) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -1215,7 +1335,15 @@ export class OpenAI { ); const url: string = utils.generateURL(baseURL, "/files/{file_id}", req); const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -1281,7 +1409,15 @@ export class OpenAI { ); const url: string = utils.generateURL(baseURL, "/models/{model}", req); const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -1331,7 +1467,7 @@ export class OpenAI { } /** - * Returns the contents of the specified file + * Returns the contents of the specified file. */ async downloadFile( req: operations.DownloadFileRequest, @@ -1347,7 +1483,15 @@ export class OpenAI { ); const url: string = utils.generateURL(baseURL, "/files/{file_id}/content", req); const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -1403,7 +1547,15 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/files"; const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -1472,7 +1624,15 @@ export class OpenAI { ); const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}/events", req); const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; const queryParams: string = utils.serializeQueryParams(req); headers["Accept"] = "application/json"; @@ -1536,7 +1696,15 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/fine-tunes"; const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -1607,7 +1775,15 @@ export class OpenAI { req ); const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; const queryParams: string = utils.serializeQueryParams(req); headers["Accept"] = "application/json"; @@ -1668,7 +1844,15 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/models"; const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -1735,7 +1919,15 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/fine_tuning/jobs"; const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; const queryParams: string = utils.serializeQueryParams(req); headers["Accept"] = "application/json"; @@ -1803,7 +1995,15 @@ export class OpenAI { ); const url: string = utils.generateURL(baseURL, "/files/{file_id}", req); const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -1871,7 +2071,15 @@ export class OpenAI { ); const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}", req); const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -1941,7 +2149,15 @@ export class OpenAI { req ); const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -2008,7 +2224,15 @@ export class OpenAI { ); const url: string = utils.generateURL(baseURL, "/models/{model}", req); const client: AxiosInstance = this.sdkConfiguration.defaultClient; - const headers = { ...config?.headers }; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 35cb5b5..52b9936 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -2,6 +2,7 @@ * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. */ +import * as shared from "./models/shared"; import { OpenAI } from "./openai"; import axios from "axios"; import { AxiosInstance } from "axios"; @@ -15,6 +16,10 @@ export const ServerList = ["https://api.openai.com/v1"] as const; * The available configuration options for the SDK */ export type SDKProps = { + /** + * The security details required to authenticate the SDK + */ + security?: shared.Security | (() => Promise); /** * Allows overriding the default axios client used by the SDK */ @@ -33,12 +38,13 @@ export type SDKProps = { export class SDKConfiguration { defaultClient: AxiosInstance; + security?: shared.Security | (() => Promise); serverURL: string; serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.22.0"; - genVersion = "2.96.3"; + sdkVersion = "2.22.1"; + genVersion = "2.96.6"; public constructor(init?: Partial) { Object.assign(this, init); @@ -46,7 +52,7 @@ export class SDKConfiguration { } /** - * OpenAI API: APIs for sampling from and fine-tuning language models + * OpenAI API: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. */ export class Gpt { /** @@ -67,6 +73,7 @@ export class Gpt { const defaultClient = props?.defaultClient ?? axios.create({ baseURL: serverURL }); this.sdkConfiguration = new SDKConfiguration({ defaultClient: defaultClient, + security: props?.security, serverURL: serverURL, }); From 41bbb7df0e5e88ee3481e6b40c1b13023e10e3fc Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 8 Sep 2023 00:54:10 +0000 Subject: [PATCH 48/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.82.0 --- RELEASES.md | 12 +- docs/models/shared/createeditresponse.md | 2 - docs/models/shared/createembeddingresponse.md | 2 - .../shared/createtranscriptionresponse.md | 2 - .../shared/createtranslationresponse.md | 2 - docs/models/shared/deletefileresponse.md | 2 - docs/models/shared/deletemodelresponse.md | 2 - docs/models/shared/finetuningjob.md | 8 +- docs/models/shared/imagesresponse.md | 2 - docs/models/shared/listfilesresponse.md | 2 - .../shared/listfinetuneeventsresponse.md | 2 - docs/models/shared/listfinetunesresponse.md | 2 - .../shared/listfinetuningjobeventsresponse.md | 2 - docs/models/shared/listmodelsresponse.md | 2 - .../listpaginatedfinetuningjobsresponse.md | 2 - docs/sdks/openai/README.md | 182 +++++++----------- gen.yaml | 8 +- package-lock.json | 4 +- package.json | 2 +- src/sdk/models/shared/createeditresponse.ts | 2 - .../models/shared/createembeddingresponse.ts | 3 - .../shared/createtranscriptionresponse.ts | 3 - .../shared/createtranslationresponse.ts | 3 - src/sdk/models/shared/deletefileresponse.ts | 3 - src/sdk/models/shared/deletemodelresponse.ts | 3 - src/sdk/models/shared/finetuningjob.ts | 12 +- src/sdk/models/shared/imagesresponse.ts | 3 - src/sdk/models/shared/listfilesresponse.ts | 3 - .../shared/listfinetuneeventsresponse.ts | 3 - .../models/shared/listfinetunesresponse.ts | 3 - .../shared/listfinetuningjobeventsresponse.ts | 3 - src/sdk/models/shared/listmodelsresponse.ts | 3 - .../listpaginatedfinetuningjobsresponse.ts | 3 - src/sdk/sdk.ts | 11 +- 34 files changed, 102 insertions(+), 201 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 9c86142..c273469 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -550,4 +550,14 @@ Based on: ### Generated - [typescript v2.22.1] . ### Releases -- [NPM v2.22.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.1 - . \ No newline at end of file +- [NPM v2.22.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.1 - . + +## 2023-09-08 00:53:45 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.82.0 (2.107.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.22.2] . +### Releases +- [NPM v2.22.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.2 - . \ No newline at end of file diff --git a/docs/models/shared/createeditresponse.md b/docs/models/shared/createeditresponse.md index 22c1e0c..d093b37 100755 --- a/docs/models/shared/createeditresponse.md +++ b/docs/models/shared/createeditresponse.md @@ -1,7 +1,5 @@ # ~~CreateEditResponse~~ -OK - > :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. diff --git a/docs/models/shared/createembeddingresponse.md b/docs/models/shared/createembeddingresponse.md index 18cc762..60a2457 100755 --- a/docs/models/shared/createembeddingresponse.md +++ b/docs/models/shared/createembeddingresponse.md @@ -1,7 +1,5 @@ # CreateEmbeddingResponse -OK - ## Fields diff --git a/docs/models/shared/createtranscriptionresponse.md b/docs/models/shared/createtranscriptionresponse.md index 87de4e9..873ed5c 100755 --- a/docs/models/shared/createtranscriptionresponse.md +++ b/docs/models/shared/createtranscriptionresponse.md @@ -1,7 +1,5 @@ # CreateTranscriptionResponse -OK - ## Fields diff --git a/docs/models/shared/createtranslationresponse.md b/docs/models/shared/createtranslationresponse.md index 13a17f7..5aedb8c 100755 --- a/docs/models/shared/createtranslationresponse.md +++ b/docs/models/shared/createtranslationresponse.md @@ -1,7 +1,5 @@ # CreateTranslationResponse -OK - ## Fields diff --git a/docs/models/shared/deletefileresponse.md b/docs/models/shared/deletefileresponse.md index 4ea35c1..a227769 100755 --- a/docs/models/shared/deletefileresponse.md +++ b/docs/models/shared/deletefileresponse.md @@ -1,7 +1,5 @@ # DeleteFileResponse -OK - ## Fields diff --git a/docs/models/shared/deletemodelresponse.md b/docs/models/shared/deletemodelresponse.md index 2344b1b..8886eb5 100755 --- a/docs/models/shared/deletemodelresponse.md +++ b/docs/models/shared/deletemodelresponse.md @@ -1,7 +1,5 @@ # DeleteModelResponse -OK - ## Fields diff --git a/docs/models/shared/finetuningjob.md b/docs/models/shared/finetuningjob.md index e183c8b..a7cb05d 100755 --- a/docs/models/shared/finetuningjob.md +++ b/docs/models/shared/finetuningjob.md @@ -9,15 +9,15 @@ The `fine_tuning.job` object represents a fine-tuning job that has been created | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | | `createdAt` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) for when the fine-tuning job was created. | -| `fineTunedModel` | *string* | :heavy_check_mark: | The name of the fine-tuned model that is being created. | -| `finishedAt` | *number* | :heavy_minus_sign: | The Unix timestamp (in seconds) for when the fine-tuning job was finished. | +| `fineTunedModel` | *string* | :heavy_check_mark: | The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. | +| `finishedAt` | *number* | :heavy_minus_sign: | The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. | | `hyperparameters` | [FineTuningJobHyperparameters](../../models/shared/finetuningjobhyperparameters.md) | :heavy_check_mark: | The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. | | `id` | *string* | :heavy_check_mark: | The object identifier, which can be referenced in the API endpoints. | | `model` | *string* | :heavy_check_mark: | The base model that is being fine-tuned. | | `object` | *string* | :heavy_check_mark: | The object type, which is always "fine_tuning.job". | | `organizationId` | *string* | :heavy_check_mark: | The organization that owns the fine-tuning job. | -| `resultFiles` | [OpenAIFile](../../models/shared/openaifile.md)[] | :heavy_check_mark: | The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). | +| `resultFiles` | *string*[] | :heavy_check_mark: | The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). | | `status` | *string* | :heavy_check_mark: | The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, `succeeded`, `failed`, or `cancelled`. | -| `trainedTokens` | *number* | :heavy_check_mark: | The total number of billable tokens processed by this fine-tuning job. | +| `trainedTokens` | *number* | :heavy_check_mark: | The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. | | `trainingFile` | *string* | :heavy_check_mark: | The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). | | `validationFile` | *string* | :heavy_check_mark: | The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). | \ No newline at end of file diff --git a/docs/models/shared/imagesresponse.md b/docs/models/shared/imagesresponse.md index ed03dba..c74080a 100755 --- a/docs/models/shared/imagesresponse.md +++ b/docs/models/shared/imagesresponse.md @@ -1,7 +1,5 @@ # ImagesResponse -OK - ## Fields diff --git a/docs/models/shared/listfilesresponse.md b/docs/models/shared/listfilesresponse.md index 33b46cb..6e97313 100755 --- a/docs/models/shared/listfilesresponse.md +++ b/docs/models/shared/listfilesresponse.md @@ -1,7 +1,5 @@ # ListFilesResponse -OK - ## Fields diff --git a/docs/models/shared/listfinetuneeventsresponse.md b/docs/models/shared/listfinetuneeventsresponse.md index 327a8b5..057b80e 100755 --- a/docs/models/shared/listfinetuneeventsresponse.md +++ b/docs/models/shared/listfinetuneeventsresponse.md @@ -1,7 +1,5 @@ # ListFineTuneEventsResponse -OK - ## Fields diff --git a/docs/models/shared/listfinetunesresponse.md b/docs/models/shared/listfinetunesresponse.md index 509428c..ccef26b 100755 --- a/docs/models/shared/listfinetunesresponse.md +++ b/docs/models/shared/listfinetunesresponse.md @@ -1,7 +1,5 @@ # ListFineTunesResponse -OK - ## Fields diff --git a/docs/models/shared/listfinetuningjobeventsresponse.md b/docs/models/shared/listfinetuningjobeventsresponse.md index 77787a5..4f354d1 100755 --- a/docs/models/shared/listfinetuningjobeventsresponse.md +++ b/docs/models/shared/listfinetuningjobeventsresponse.md @@ -1,7 +1,5 @@ # ListFineTuningJobEventsResponse -OK - ## Fields diff --git a/docs/models/shared/listmodelsresponse.md b/docs/models/shared/listmodelsresponse.md index b73fcce..1392538 100755 --- a/docs/models/shared/listmodelsresponse.md +++ b/docs/models/shared/listmodelsresponse.md @@ -1,7 +1,5 @@ # ListModelsResponse -OK - ## Fields diff --git a/docs/models/shared/listpaginatedfinetuningjobsresponse.md b/docs/models/shared/listpaginatedfinetuningjobsresponse.md index b37d45e..73950f2 100755 --- a/docs/models/shared/listpaginatedfinetuningjobsresponse.md +++ b/docs/models/shared/listpaginatedfinetuningjobsresponse.md @@ -1,7 +1,5 @@ # ListPaginatedFineTuningJobsResponse -OK - ## Fields diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index 2673365..a4b1e81 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -166,67 +166,33 @@ sdk.openAI.createChatCompletion({ }, functions: [ { - description: "vel", - name: "Doug Hoppe", + description: "illum", + name: "Sabrina Oberbrunner", parameters: { - "ipsa": "delectus", - "tempora": "suscipit", - "molestiae": "minus", - "placeat": "voluptatum", - }, - }, - { - description: "iusto", - name: "Charlie Walsh II", - parameters: { - "deserunt": "perferendis", - }, - }, - { - description: "ipsam", - name: "Timmy Satterfield", - parameters: { - "maiores": "molestiae", - "quod": "quod", - "esse": "totam", - "porro": "dolorum", - }, - }, - { - description: "dicta", - name: "Luke McCullough", - parameters: { - "optio": "totam", - "beatae": "commodi", - "molestiae": "modi", - "qui": "impedit", + "magnam": "debitis", }, }, ], logitBias: { - "esse": 216550, - "excepturi": 135218, - "perferendis": 324141, + "ipsa": 963663, }, - maxTokens: 617636, + maxTokens: 272656, messages: [ { - content: "iste", + content: "suscipit", functionCall: { - arguments: "dolor", - name: "Lester Welch", + arguments: "molestiae", + name: "Irving Lehner", }, - name: "Stacy Moore", - role: ChatCompletionRequestMessageRole.Assistant, + name: "Mrs. Sophie Smith MD", + role: ChatCompletionRequestMessageRole.System, }, ], model: "gpt-3.5-turbo", n: 1, - presencePenalty: 602.25, + presencePenalty: 8326.2, stop: [ - "mollitia", - "laborum", - "dolores", + "quo", ], stream: false, temperature: 1, @@ -270,20 +236,23 @@ const sdk = new Gpt({ }); sdk.openAI.createCompletion({ - bestOf: 210382, + bestOf: 140350, echo: false, - frequencyPenalty: 3581.52, + frequencyPenalty: 8700.13, logitBias: { - "nobis": 315428, + "at": 978619, }, - logprobs: 607831, + logprobs: 473608, maxTokens: 16, - model: "minima", + model: CreateCompletionRequestModel2.TextBabbage001, n: 1, - presencePenalty: 5701.97, - prompt: "This is a test.", - stop: " -", + presencePenalty: 4614.79, + prompt: [ + 780529, + ], + stop: [ + "["\n"]", + ], stream: false, suffix: "test.", temperature: 1, @@ -331,7 +300,7 @@ const sdk = new Gpt({ sdk.openAI.createEdit({ input: "What day of the wek is it?", instruction: "Fix the spelling mistakes.", - model: CreateEditRequestModel2.TextDavinciEdit001, + model: "text-davinci-edit-001", n: 1, temperature: 1, topP: 1, @@ -374,22 +343,9 @@ const sdk = new Gpt({ sdk.openAI.createEmbedding({ input: [ - [ - 652790, - ], - [ - 635059, - ], - [ - 995300, - ], - [ - 581850, - 253291, - 414369, - ], + 639921, ], - model: "text-embedding-ada-002", + model: CreateEmbeddingRequestModel2.TextEmbeddingAda002, user: "user-1234", }).then((res: CreateEmbeddingResponse) => { if (res.statusCode == 200) { @@ -430,10 +386,10 @@ const sdk = new Gpt({ sdk.openAI.createFile({ file: { - content: "molestiae".encode(), - file: "velit", + content: "fugit".encode(), + file: "deleniti", }, - purpose: "error", + purpose: "hic", }).then((res: CreateFileResponse) => { if (res.statusCode == 200) { // handle response @@ -479,19 +435,18 @@ const sdk = new Gpt({ }); sdk.openAI.createFineTune({ - batchSize: 158969, + batchSize: 758616, classificationBetas: [ - 1103.75, - 6747.52, + 5218.48, ], - classificationNClasses: 656330, - classificationPositiveClass: "enim", + classificationNClasses: 105907, + classificationPositiveClass: "commodi", computeClassificationMetrics: false, - learningRateMultiplier: 1381.83, - model: CreateFineTuneRequestModel2.Curie, - nEpochs: 196582, - promptLossWeight: 9495.72, - suffix: "ipsam", + learningRateMultiplier: 4736, + model: "curie", + nEpochs: 186332, + promptLossWeight: 7742.34, + suffix: "cum", trainingFile: "file-abc123", validationFile: "file-abc123", }).then((res: CreateFineTuneResponse) => { @@ -541,10 +496,10 @@ const sdk = new Gpt({ sdk.openAI.createFineTuningJob({ hyperparameters: { - nEpochs: 820994, + nEpochs: CreateFineTuningJobRequestHyperparametersNEpochs1.Auto, }, model: "gpt-3.5-turbo", - suffix: "quasi", + suffix: "excepturi", trainingFile: "file-abc123", validationFile: "file-abc123", }).then((res: CreateFineTuningJobResponse) => { @@ -629,12 +584,12 @@ const sdk = new Gpt({ sdk.openAI.createImageEdit({ image: { - content: "error".encode(), - image: "temporibus", + content: "aspernatur".encode(), + image: "perferendis", }, mask: { - content: "laborum".encode(), - mask: "quasi", + content: "ad".encode(), + mask: "natus", }, n: 1, prompt: "A cute baby sea otter wearing a beret", @@ -680,8 +635,8 @@ const sdk = new Gpt({ sdk.openAI.createImageVariation({ image: { - content: "reiciendis".encode(), - image: "voluptatibus", + content: "sed".encode(), + image: "iste", }, n: 1, responseFormat: CreateImageVariationRequestResponseFormat.Url, @@ -725,10 +680,7 @@ const sdk = new Gpt({ }); sdk.openAI.createModeration({ - input: [ - "I want to kill them.", - "I want to kill them.", - ], + input: "I want to kill them.", model: CreateModerationRequestModel2.TextModerationStable, }).then((res: CreateModerationResponse) => { if (res.statusCode == 200) { @@ -769,14 +721,14 @@ const sdk = new Gpt({ sdk.openAI.createTranscription({ file: { - content: "voluptatibus".encode(), - file: "ipsa", + content: "laboriosam".encode(), + file: "hic", }, - language: "omnis", - model: "whisper-1", - prompt: "cum", - responseFormat: CreateTranscriptionRequestResponseFormat.Json, - temperature: 391.87, + language: "saepe", + model: CreateTranscriptionRequestModel2.Whisper1, + prompt: "in", + responseFormat: CreateTranscriptionRequestResponseFormat.Text, + temperature: 6130.64, }).then((res: CreateTranscriptionResponse) => { if (res.statusCode == 200) { // handle response @@ -816,13 +768,13 @@ const sdk = new Gpt({ sdk.openAI.createTranslation({ file: { - content: "reprehenderit".encode(), - file: "ut", + content: "iure".encode(), + file: "saepe", }, model: CreateTranslationRequestModel2.Whisper1, - prompt: "dicta", - responseFormat: "corporis", - temperature: 2961.4, + prompt: "architecto", + responseFormat: "ipsa", + temperature: 9698.1, }).then((res: CreateTranslationResponse) => { if (res.statusCode == 200) { // handle response @@ -860,7 +812,7 @@ const sdk = new Gpt({ }); sdk.openAI.deleteFile({ - fileId: "iusto", + fileId: "est", }).then((res: DeleteFileResponse) => { if (res.statusCode == 200) { // handle response @@ -936,7 +888,7 @@ const sdk = new Gpt({ }); sdk.openAI.downloadFile({ - fileId: "dicta", + fileId: "mollitia", }).then((res: DownloadFileResponse) => { if (res.statusCode == 200) { // handle response @@ -1090,9 +1042,9 @@ const sdk = new Gpt({ }); sdk.openAI.listFineTuningEvents({ - after: "harum", + after: "laborum", fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - limit: 317983, + limit: 170909, }).then((res: ListFineTuningEventsResponse) => { if (res.statusCode == 200) { // handle response @@ -1166,8 +1118,8 @@ const sdk = new Gpt({ }); sdk.openAI.listPaginatedFineTuningJobs({ - after: "accusamus", - limit: 414263, + after: "dolorem", + limit: 358152, }).then((res: ListPaginatedFineTuningJobsResponse) => { if (res.statusCode == 200) { // handle response @@ -1205,7 +1157,7 @@ const sdk = new Gpt({ }); sdk.openAI.retrieveFile({ - fileId: "repudiandae", + fileId: "explicabo", }).then((res: RetrieveFileResponse) => { if (res.statusCode == 200) { // handle response diff --git a/gen.yaml b/gen.yaml index 5d26019..799656c 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,9 +1,9 @@ configVersion: 1.0.0 management: - docChecksum: 6b9863d82dd2ad263778aa831eac9296 + docChecksum: 23fb01ce67ab1cc9ea6a631a0de21c44 docVersion: 2.0.0 - speakeasyVersion: 1.78.8 - generationVersion: 2.96.6 + speakeasyVersion: 1.82.0 + generationVersion: 2.107.0 generation: sdkClassName: gpt sdkFlattening: true @@ -16,7 +16,7 @@ features: globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.22.1 + version: 2.22.2 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 997c6a1..b30acd4 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.1", + "version": "2.22.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.22.1", + "version": "2.22.2", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 2f8d363..e37151d 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.1", + "version": "2.22.2", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/createeditresponse.ts b/src/sdk/models/shared/createeditresponse.ts index 10c1504..0b38720 100755 --- a/src/sdk/models/shared/createeditresponse.ts +++ b/src/sdk/models/shared/createeditresponse.ts @@ -46,8 +46,6 @@ export class CreateEditResponseChoices extends SpeakeasyBase { } /** - * OK - * * @deprecated class: This will be removed in a future release, please migrate away from it as soon as possible. */ export class CreateEditResponse extends SpeakeasyBase { diff --git a/src/sdk/models/shared/createembeddingresponse.ts b/src/sdk/models/shared/createembeddingresponse.ts index 3cea575..394bff5 100755 --- a/src/sdk/models/shared/createembeddingresponse.ts +++ b/src/sdk/models/shared/createembeddingresponse.ts @@ -25,9 +25,6 @@ export class CreateEmbeddingResponseUsage extends SpeakeasyBase { totalTokens: number; } -/** - * OK - */ export class CreateEmbeddingResponse extends SpeakeasyBase { /** * The list of embeddings generated by the model. diff --git a/src/sdk/models/shared/createtranscriptionresponse.ts b/src/sdk/models/shared/createtranscriptionresponse.ts index 12a63c9..7da4e94 100755 --- a/src/sdk/models/shared/createtranscriptionresponse.ts +++ b/src/sdk/models/shared/createtranscriptionresponse.ts @@ -5,9 +5,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; -/** - * OK - */ export class CreateTranscriptionResponse extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "text" }) diff --git a/src/sdk/models/shared/createtranslationresponse.ts b/src/sdk/models/shared/createtranslationresponse.ts index b299cbd..a7a92b0 100755 --- a/src/sdk/models/shared/createtranslationresponse.ts +++ b/src/sdk/models/shared/createtranslationresponse.ts @@ -5,9 +5,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; -/** - * OK - */ export class CreateTranslationResponse extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "text" }) diff --git a/src/sdk/models/shared/deletefileresponse.ts b/src/sdk/models/shared/deletefileresponse.ts index 30e9647..9d4d2d6 100755 --- a/src/sdk/models/shared/deletefileresponse.ts +++ b/src/sdk/models/shared/deletefileresponse.ts @@ -5,9 +5,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; -/** - * OK - */ export class DeleteFileResponse extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "deleted" }) diff --git a/src/sdk/models/shared/deletemodelresponse.ts b/src/sdk/models/shared/deletemodelresponse.ts index 11986a9..a05dbc9 100755 --- a/src/sdk/models/shared/deletemodelresponse.ts +++ b/src/sdk/models/shared/deletemodelresponse.ts @@ -5,9 +5,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; -/** - * OK - */ export class DeleteModelResponse extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "deleted" }) diff --git a/src/sdk/models/shared/finetuningjob.ts b/src/sdk/models/shared/finetuningjob.ts index 971d222..8a743fd 100755 --- a/src/sdk/models/shared/finetuningjob.ts +++ b/src/sdk/models/shared/finetuningjob.ts @@ -3,7 +3,6 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { OpenAIFile } from "./openaifile"; import { Expose, Type } from "class-transformer"; /** @@ -46,14 +45,14 @@ export class FineTuningJob extends SpeakeasyBase { createdAt: number; /** - * The name of the fine-tuned model that is being created. + * The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. */ @SpeakeasyMetadata() @Expose({ name: "fine_tuned_model" }) fineTunedModel: string; /** - * The Unix timestamp (in seconds) for when the fine-tuning job was finished. + * The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. */ @SpeakeasyMetadata() @Expose({ name: "finished_at" }) @@ -98,10 +97,9 @@ export class FineTuningJob extends SpeakeasyBase { /** * The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). */ - @SpeakeasyMetadata({ elemType: OpenAIFile }) + @SpeakeasyMetadata() @Expose({ name: "result_files" }) - @Type(() => OpenAIFile) - resultFiles: OpenAIFile[]; + resultFiles: string[]; /** * The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, `succeeded`, `failed`, or `cancelled`. @@ -111,7 +109,7 @@ export class FineTuningJob extends SpeakeasyBase { status: string; /** - * The total number of billable tokens processed by this fine-tuning job. + * The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. */ @SpeakeasyMetadata() @Expose({ name: "trained_tokens" }) diff --git a/src/sdk/models/shared/imagesresponse.ts b/src/sdk/models/shared/imagesresponse.ts index 352015e..8378d01 100755 --- a/src/sdk/models/shared/imagesresponse.ts +++ b/src/sdk/models/shared/imagesresponse.ts @@ -6,9 +6,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Image } from "./image"; import { Expose, Type } from "class-transformer"; -/** - * OK - */ export class ImagesResponse extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "created" }) diff --git a/src/sdk/models/shared/listfilesresponse.ts b/src/sdk/models/shared/listfilesresponse.ts index cbbfddc..7e43646 100755 --- a/src/sdk/models/shared/listfilesresponse.ts +++ b/src/sdk/models/shared/listfilesresponse.ts @@ -6,9 +6,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { OpenAIFile } from "./openaifile"; import { Expose, Type } from "class-transformer"; -/** - * OK - */ export class ListFilesResponse extends SpeakeasyBase { @SpeakeasyMetadata({ elemType: OpenAIFile }) @Expose({ name: "data" }) diff --git a/src/sdk/models/shared/listfinetuneeventsresponse.ts b/src/sdk/models/shared/listfinetuneeventsresponse.ts index d670361..1eeebd8 100755 --- a/src/sdk/models/shared/listfinetuneeventsresponse.ts +++ b/src/sdk/models/shared/listfinetuneeventsresponse.ts @@ -6,9 +6,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { FineTuneEvent } from "./finetuneevent"; import { Expose, Type } from "class-transformer"; -/** - * OK - */ export class ListFineTuneEventsResponse extends SpeakeasyBase { @SpeakeasyMetadata({ elemType: FineTuneEvent }) @Expose({ name: "data" }) diff --git a/src/sdk/models/shared/listfinetunesresponse.ts b/src/sdk/models/shared/listfinetunesresponse.ts index 897f927..a9db242 100755 --- a/src/sdk/models/shared/listfinetunesresponse.ts +++ b/src/sdk/models/shared/listfinetunesresponse.ts @@ -6,9 +6,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { FineTune } from "./finetune"; import { Expose, Type } from "class-transformer"; -/** - * OK - */ export class ListFineTunesResponse extends SpeakeasyBase { @SpeakeasyMetadata({ elemType: FineTune }) @Expose({ name: "data" }) diff --git a/src/sdk/models/shared/listfinetuningjobeventsresponse.ts b/src/sdk/models/shared/listfinetuningjobeventsresponse.ts index 70be312..6af7121 100755 --- a/src/sdk/models/shared/listfinetuningjobeventsresponse.ts +++ b/src/sdk/models/shared/listfinetuningjobeventsresponse.ts @@ -6,9 +6,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { FineTuningJobEvent } from "./finetuningjobevent"; import { Expose, Type } from "class-transformer"; -/** - * OK - */ export class ListFineTuningJobEventsResponse extends SpeakeasyBase { @SpeakeasyMetadata({ elemType: FineTuningJobEvent }) @Expose({ name: "data" }) diff --git a/src/sdk/models/shared/listmodelsresponse.ts b/src/sdk/models/shared/listmodelsresponse.ts index 0fe5605..7da8846 100755 --- a/src/sdk/models/shared/listmodelsresponse.ts +++ b/src/sdk/models/shared/listmodelsresponse.ts @@ -6,9 +6,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Model } from "./model"; import { Expose, Type } from "class-transformer"; -/** - * OK - */ export class ListModelsResponse extends SpeakeasyBase { @SpeakeasyMetadata({ elemType: Model }) @Expose({ name: "data" }) diff --git a/src/sdk/models/shared/listpaginatedfinetuningjobsresponse.ts b/src/sdk/models/shared/listpaginatedfinetuningjobsresponse.ts index 4068e97..e54f335 100755 --- a/src/sdk/models/shared/listpaginatedfinetuningjobsresponse.ts +++ b/src/sdk/models/shared/listpaginatedfinetuningjobsresponse.ts @@ -6,9 +6,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { FineTuningJob } from "./finetuningjob"; import { Expose, Type } from "class-transformer"; -/** - * OK - */ export class ListPaginatedFineTuningJobsResponse extends SpeakeasyBase { @SpeakeasyMetadata({ elemType: FineTuningJob }) @Expose({ name: "data" }) diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 52b9936..fc80292 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -34,6 +34,10 @@ export type SDKProps = { * Allows overriding the default server URL used by the SDK */ serverURL?: string; + /** + * Allows overriding the default retry config used by the SDK + */ + retryConfig?: utils.RetryConfig; }; export class SDKConfiguration { @@ -43,9 +47,9 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.22.1"; - genVersion = "2.96.6"; - + sdkVersion = "2.22.2"; + genVersion = "2.107.0"; + retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); } @@ -75,6 +79,7 @@ export class Gpt { defaultClient: defaultClient, security: props?.security, serverURL: serverURL, + retryConfig: props?.retryConfig, }); this.openAI = new OpenAI(this.sdkConfiguration); From 72928695dbeb7e17bf445334f3ef9dcb1efa96b9 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sat, 9 Sep 2023 00:52:53 +0000 Subject: [PATCH 49/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.82.3 --- RELEASES.md | 12 +++++++++++- gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- src/internal/utils/retries.ts | 4 ++-- src/sdk/sdk.ts | 5 +++-- 6 files changed, 23 insertions(+), 12 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index c273469..4e63888 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -560,4 +560,14 @@ Based on: ### Generated - [typescript v2.22.2] . ### Releases -- [NPM v2.22.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.2 - . \ No newline at end of file +- [NPM v2.22.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.2 - . + +## 2023-09-09 00:52:30 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.82.3 (2.107.3) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.22.3] . +### Releases +- [NPM v2.22.3] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.3 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 799656c..f4356db 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: 23fb01ce67ab1cc9ea6a631a0de21c44 docVersion: 2.0.0 - speakeasyVersion: 1.82.0 - generationVersion: 2.107.0 + speakeasyVersion: 1.82.3 + generationVersion: 2.107.3 generation: sdkClassName: gpt sdkFlattening: true @@ -11,12 +11,12 @@ generation: telemetryEnabled: false features: typescript: - core: 2.87.0 + core: 2.87.1 deprecations: 2.81.1 globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.22.2 + version: 2.22.3 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index b30acd4..bdc8826 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.2", + "version": "2.22.3", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.22.2", + "version": "2.22.3", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index e37151d..e95d226 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.2", + "version": "2.22.3", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/internal/utils/retries.ts b/src/internal/utils/retries.ts index bc9d5e3..8fea21d 100755 --- a/src/internal/utils/retries.ts +++ b/src/internal/utils/retries.ts @@ -24,12 +24,12 @@ export class BackoffStrategy { } export class RetryConfig { - strategy: string; + strategy: "backoff" | "none"; backoff?: BackoffStrategy; retryConnectionErrors: boolean; constructor( - strategy: string, + strategy: "backoff" | "none", backoff?: BackoffStrategy, retryConnectionErrors = true ) { diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index fc80292..ef44419 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -2,6 +2,7 @@ * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. */ +import * as utils from "../internal/utils"; import * as shared from "./models/shared"; import { OpenAI } from "./openai"; import axios from "axios"; @@ -47,8 +48,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.22.2"; - genVersion = "2.107.0"; + sdkVersion = "2.22.3"; + genVersion = "2.107.3"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From 2f96f0d39c23a991304fb0ca4a0491ef09f60599 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 13 Sep 2023 00:55:44 +0000 Subject: [PATCH 50/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.82.5 --- RELEASES.md | 12 ++++++- .../chatcompletionfunctioncalloption.md | 2 +- .../shared/createchatcompletionrequest.md | 2 +- ...reatechatcompletionrequestfunctioncall1.md | 2 +- docs/models/shared/createembeddingrequest.md | 10 +++--- .../shared/createfinetuningjobrequest.md | 2 +- docs/models/shared/finetuningjob.md | 1 + docs/models/shared/finetuningjoberror.md | 12 +++++++ files.gen | 1 + gen.yaml | 8 ++--- package-lock.json | 4 +-- package.json | 2 +- .../chatcompletionfunctioncalloption.ts | 2 +- .../shared/createchatcompletionrequest.ts | 4 +-- .../models/shared/createembeddingrequest.ts | 2 +- .../shared/createfinetuningjobrequest.ts | 2 +- src/sdk/models/shared/finetuningjob.ts | 34 +++++++++++++++++++ src/sdk/sdk.ts | 4 +-- 18 files changed, 82 insertions(+), 24 deletions(-) create mode 100755 docs/models/shared/finetuningjoberror.md diff --git a/RELEASES.md b/RELEASES.md index 4e63888..649448c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -570,4 +570,14 @@ Based on: ### Generated - [typescript v2.22.3] . ### Releases -- [NPM v2.22.3] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.3 - . \ No newline at end of file +- [NPM v2.22.3] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.3 - . + +## 2023-09-13 00:55:20 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.82.5 (2.108.3) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.22.4] . +### Releases +- [NPM v2.22.4] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.4 - . \ No newline at end of file diff --git a/docs/models/shared/chatcompletionfunctioncalloption.md b/docs/models/shared/chatcompletionfunctioncalloption.md index 5ce7048..b4f28c6 100755 --- a/docs/models/shared/chatcompletionfunctioncalloption.md +++ b/docs/models/shared/chatcompletionfunctioncalloption.md @@ -1,6 +1,6 @@ # ChatCompletionFunctionCallOption -Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. +Controls how the model responds to function calls. `none` means the model does not call a function, and responds to the end-user. `auto` means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. ## Fields diff --git a/docs/models/shared/createchatcompletionrequest.md b/docs/models/shared/createchatcompletionrequest.md index b616f9a..38ddc2d 100755 --- a/docs/models/shared/createchatcompletionrequest.md +++ b/docs/models/shared/createchatcompletionrequest.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | Example | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `frequencyPenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.

[See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details)
| | -| `functionCall` | *any* | :heavy_minus_sign: | Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. | | +| `functionCall` | *any* | :heavy_minus_sign: | Controls how the model responds to function calls. `none` means the model does not call a function, and responds to the end-user. `auto` means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. | | | `functions` | [ChatCompletionFunctions](../../models/shared/chatcompletionfunctions.md)[] | :heavy_minus_sign: | A list of functions the model may generate JSON inputs for. | | | `logitBias` | Record | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
| | | `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the chat completion.

The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| | diff --git a/docs/models/shared/createchatcompletionrequestfunctioncall1.md b/docs/models/shared/createchatcompletionrequestfunctioncall1.md index 43e51c8..c6ee534 100755 --- a/docs/models/shared/createchatcompletionrequestfunctioncall1.md +++ b/docs/models/shared/createchatcompletionrequestfunctioncall1.md @@ -1,6 +1,6 @@ # CreateChatCompletionRequestFunctionCall1 -Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. +Controls how the model responds to function calls. `none` means the model does not call a function, and responds to the end-user. `auto` means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. ## Values diff --git a/docs/models/shared/createembeddingrequest.md b/docs/models/shared/createembeddingrequest.md index 9de9fd3..d0110d9 100755 --- a/docs/models/shared/createembeddingrequest.md +++ b/docs/models/shared/createembeddingrequest.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `input` | *any* | :heavy_check_mark: | Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`). [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| | -| `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
| | -| `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input` | *any* | :heavy_check_mark: | Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| | +| `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
| | +| `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | \ No newline at end of file diff --git a/docs/models/shared/createfinetuningjobrequest.md b/docs/models/shared/createfinetuningjobrequest.md index 85825b5..04bd699 100755 --- a/docs/models/shared/createfinetuningjobrequest.md +++ b/docs/models/shared/createfinetuningjobrequest.md @@ -7,6 +7,6 @@ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `hyperparameters` | [CreateFineTuningJobRequestHyperparameters](../../models/shared/createfinetuningjobrequesthyperparameters.md) | :heavy_minus_sign: | The hyperparameters used for the fine-tuning job. | | | `model` | *any* | :heavy_check_mark: | The name of the model to fine-tune. You can select one of the
[supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
| | -| `suffix` | *string* | :heavy_minus_sign: | A string of up to 40 characters that will be added to your fine-tuned model name.

For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
| | +| `suffix` | *string* | :heavy_minus_sign: | A string of up to 18 characters that will be added to your fine-tuned model name.

For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
| | | `trainingFile` | *string* | :heavy_check_mark: | The ID of an uploaded file that contains training data.

See [upload file](/docs/api-reference/files/upload) for how to upload a file.

Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.

See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
| file-abc123 | | `validationFile` | *string* | :heavy_minus_sign: | The ID of an uploaded file that contains validation data.

If you provide this file, the data is used to generate validation
metrics periodically during fine-tuning. These metrics can be viewed in
the fine-tuning results file.
The same data should not be present in both train and validation files.

Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`.

See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
| file-abc123 | \ No newline at end of file diff --git a/docs/models/shared/finetuningjob.md b/docs/models/shared/finetuningjob.md index a7cb05d..d0e29e4 100755 --- a/docs/models/shared/finetuningjob.md +++ b/docs/models/shared/finetuningjob.md @@ -9,6 +9,7 @@ The `fine_tuning.job` object represents a fine-tuning job that has been created | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | | `createdAt` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) for when the fine-tuning job was created. | +| `error` | [FineTuningJobError](../../models/shared/finetuningjoberror.md) | :heavy_check_mark: | For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. | | `fineTunedModel` | *string* | :heavy_check_mark: | The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. | | `finishedAt` | *number* | :heavy_minus_sign: | The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. | | `hyperparameters` | [FineTuningJobHyperparameters](../../models/shared/finetuningjobhyperparameters.md) | :heavy_check_mark: | The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. | diff --git a/docs/models/shared/finetuningjoberror.md b/docs/models/shared/finetuningjoberror.md new file mode 100755 index 0000000..ca2efa3 --- /dev/null +++ b/docs/models/shared/finetuningjoberror.md @@ -0,0 +1,12 @@ +# FineTuningJobError + +For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------ | +| `code` | *string* | :heavy_minus_sign: | A machine-readable error code. | +| `message` | *string* | :heavy_minus_sign: | A human-readable error message. | +| `param` | *string* | :heavy_minus_sign: | The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. | \ No newline at end of file diff --git a/files.gen b/files.gen index 45dbfaa..afd2a6b 100755 --- a/files.gen +++ b/files.gen @@ -140,6 +140,7 @@ docs/models/shared/finetunehyperparams.md docs/models/shared/finetune.md docs/models/shared/openaifile.md docs/models/shared/finetuneevent.md +docs/models/shared/finetuningjoberror.md docs/models/shared/finetuningjobhyperparametersnepochs1.md docs/models/shared/finetuningjobhyperparameters.md docs/models/shared/finetuningjob.md diff --git a/gen.yaml b/gen.yaml index f4356db..6b104f3 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,9 +1,9 @@ configVersion: 1.0.0 management: - docChecksum: 23fb01ce67ab1cc9ea6a631a0de21c44 + docChecksum: 4fc5f5c34468b8e129c95fc458c47f4b docVersion: 2.0.0 - speakeasyVersion: 1.82.3 - generationVersion: 2.107.3 + speakeasyVersion: 1.82.5 + generationVersion: 2.108.3 generation: sdkClassName: gpt sdkFlattening: true @@ -16,7 +16,7 @@ features: globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.22.3 + version: 2.22.4 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index bdc8826..6b05d86 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.3", + "version": "2.22.4", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.22.3", + "version": "2.22.4", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index e95d226..53f4d2a 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.3", + "version": "2.22.4", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/chatcompletionfunctioncalloption.ts b/src/sdk/models/shared/chatcompletionfunctioncalloption.ts index 2d533aa..c7694b0 100755 --- a/src/sdk/models/shared/chatcompletionfunctioncalloption.ts +++ b/src/sdk/models/shared/chatcompletionfunctioncalloption.ts @@ -6,7 +6,7 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; /** - * Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + * Controls how the model responds to function calls. `none` means the model does not call a function, and responds to the end-user. `auto` means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. */ export class ChatCompletionFunctionCallOption extends SpeakeasyBase { /** diff --git a/src/sdk/models/shared/createchatcompletionrequest.ts b/src/sdk/models/shared/createchatcompletionrequest.ts index 7430b81..fc4f3bb 100755 --- a/src/sdk/models/shared/createchatcompletionrequest.ts +++ b/src/sdk/models/shared/createchatcompletionrequest.ts @@ -8,7 +8,7 @@ import { ChatCompletionRequestMessage } from "./chatcompletionrequestmessage"; import { Expose, Type } from "class-transformer"; /** - * Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + * Controls how the model responds to function calls. `none` means the model does not call a function, and responds to the end-user. `auto` means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. */ export enum CreateChatCompletionRequestFunctionCall1 { None = "none", @@ -46,7 +46,7 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { frequencyPenalty?: number; /** - * Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + * Controls how the model responds to function calls. `none` means the model does not call a function, and responds to the end-user. `auto` means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. */ @SpeakeasyMetadata() @Expose({ name: "function_call" }) diff --git a/src/sdk/models/shared/createembeddingrequest.ts b/src/sdk/models/shared/createembeddingrequest.ts index 54c0069..51b00fe 100755 --- a/src/sdk/models/shared/createembeddingrequest.ts +++ b/src/sdk/models/shared/createembeddingrequest.ts @@ -17,7 +17,7 @@ export enum CreateEmbeddingRequestModel2 { export class CreateEmbeddingRequest extends SpeakeasyBase { /** - * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`). [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. + * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. * * @remarks * diff --git a/src/sdk/models/shared/createfinetuningjobrequest.ts b/src/sdk/models/shared/createfinetuningjobrequest.ts index ab4501a..7e49f8b 100755 --- a/src/sdk/models/shared/createfinetuningjobrequest.ts +++ b/src/sdk/models/shared/createfinetuningjobrequest.ts @@ -66,7 +66,7 @@ export class CreateFineTuningJobRequest extends SpeakeasyBase { model: any; /** - * A string of up to 40 characters that will be added to your fine-tuned model name. + * A string of up to 18 characters that will be added to your fine-tuned model name. * * @remarks * diff --git a/src/sdk/models/shared/finetuningjob.ts b/src/sdk/models/shared/finetuningjob.ts index 8a743fd..1339f8f 100755 --- a/src/sdk/models/shared/finetuningjob.ts +++ b/src/sdk/models/shared/finetuningjob.ts @@ -5,6 +5,32 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose, Type } from "class-transformer"; +/** + * For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + */ +export class FineTuningJobError extends SpeakeasyBase { + /** + * A machine-readable error code. + */ + @SpeakeasyMetadata() + @Expose({ name: "code" }) + code?: string; + + /** + * A human-readable error message. + */ + @SpeakeasyMetadata() + @Expose({ name: "message" }) + message?: string; + + /** + * The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + */ + @SpeakeasyMetadata() + @Expose({ name: "param" }) + param?: string; +} + /** * The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. * @@ -44,6 +70,14 @@ export class FineTuningJob extends SpeakeasyBase { @Expose({ name: "created_at" }) createdAt: number; + /** + * For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + */ + @SpeakeasyMetadata() + @Expose({ name: "error" }) + @Type(() => FineTuningJobError) + error: FineTuningJobError; + /** * The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. */ diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index ef44419..1bc01cd 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -48,8 +48,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.22.3"; - genVersion = "2.107.3"; + sdkVersion = "2.22.4"; + genVersion = "2.108.3"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From 98c3fb0c9b013516865bd901d8faa0b7c6efa37b Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sat, 16 Sep 2023 00:53:31 +0000 Subject: [PATCH 51/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.86.0 --- README.md | 2 +- RELEASES.md | 12 +++++++++++- docs/sdks/openai/README.md | 2 +- gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 7 files changed, 22 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 46a24f5..e42a686 100755 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ sdk.openAI.cancelFineTune({ ## Available Resources and Operations -### [openAI](docs/sdks/openai/README.md) +### [OpenAI](docs/sdks/openai/README.md) * [~~cancelFineTune~~](docs/sdks/openai/README.md#cancelfinetune) - Immediately cancel a fine-tune job. :warning: **Deprecated** diff --git a/RELEASES.md b/RELEASES.md index 649448c..462a585 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -580,4 +580,14 @@ Based on: ### Generated - [typescript v2.22.4] . ### Releases -- [NPM v2.22.4] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.4 - . \ No newline at end of file +- [NPM v2.22.4] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.4 - . + +## 2023-09-16 00:53:07 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.86.0 (2.115.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.22.5] . +### Releases +- [NPM v2.22.5] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.5 - . \ No newline at end of file diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index a4b1e81..bda0dea 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -1,4 +1,4 @@ -# openAI +# OpenAI ## Overview diff --git a/gen.yaml b/gen.yaml index 6b104f3..b27a687 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: 4fc5f5c34468b8e129c95fc458c47f4b docVersion: 2.0.0 - speakeasyVersion: 1.82.5 - generationVersion: 2.108.3 + speakeasyVersion: 1.86.0 + generationVersion: 2.115.2 generation: sdkClassName: gpt sdkFlattening: true @@ -11,12 +11,12 @@ generation: telemetryEnabled: false features: typescript: - core: 2.87.1 + core: 2.87.2 deprecations: 2.81.1 globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.22.4 + version: 2.22.5 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 6b05d86..901f912 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.4", + "version": "2.22.5", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.22.4", + "version": "2.22.5", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 53f4d2a..f860b86 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.4", + "version": "2.22.5", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 1bc01cd..6049cee 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -48,8 +48,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.22.4"; - genVersion = "2.108.3"; + sdkVersion = "2.22.5"; + genVersion = "2.115.2"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From 303489f85bae0648b0a2c56a72a1b5650120e855 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 20 Sep 2023 00:55:02 +0000 Subject: [PATCH 52/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.88.0 --- RELEASES.md | 12 +++- .../createchatcompletionresponsechoices.md | 10 +-- ...atcompletionresponsechoicesfinishreason.md | 15 +++-- .../shared/createcompletionresponsechoices.md | 12 ++-- ...tecompletionresponsechoicesfinishreason.md | 12 ++-- .../shared/createeditresponsechoices.md | 10 +-- .../createeditresponsechoicesfinishreason.md | 3 +- gen.yaml | 10 +-- package-lock.json | 4 +- package.json | 2 +- .../shared/createchatcompletionresponse.ts | 9 ++- .../models/shared/createcompletionresponse.ts | 7 +- src/sdk/models/shared/createeditresponse.ts | 6 +- src/sdk/openai.ts | 65 ++++++++----------- src/sdk/sdk.ts | 4 +- 15 files changed, 97 insertions(+), 84 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 462a585..7d1809f 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -590,4 +590,14 @@ Based on: ### Generated - [typescript v2.22.5] . ### Releases -- [NPM v2.22.5] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.5 - . \ No newline at end of file +- [NPM v2.22.5] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.5 - . + +## 2023-09-20 00:54:36 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.88.0 (2.118.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.22.6] . +### Releases +- [NPM v2.22.6] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.6 - . \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionresponsechoices.md b/docs/models/shared/createchatcompletionresponsechoices.md index 0400387..53e6d31 100755 --- a/docs/models/shared/createchatcompletionresponsechoices.md +++ b/docs/models/shared/createchatcompletionresponsechoices.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `finishReason` | [CreateChatCompletionResponseChoicesFinishReason](../../models/shared/createchatcompletionresponsechoicesfinishreason.md) | :heavy_check_mark: | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,
`length` if the maximum number of tokens specified in the request was reached, or `function_call` if the model called a function.
| -| `index` | *number* | :heavy_check_mark: | The index of the choice in the list of choices. | -| `message` | [ChatCompletionResponseMessage](../../models/shared/chatcompletionresponsemessage.md) | :heavy_check_mark: | A chat completion message generated by the model. | \ No newline at end of file +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `finishReason` | [CreateChatCompletionResponseChoicesFinishReason](../../models/shared/createchatcompletionresponsechoicesfinishreason.md) | :heavy_check_mark: | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,
`length` if the maximum number of tokens specified in the request was reached,
`content_filter` if content was omitted due to a flag from our content filters,
or `function_call` if the model called a function.
| +| `index` | *number* | :heavy_check_mark: | The index of the choice in the list of choices. | +| `message` | [ChatCompletionResponseMessage](../../models/shared/chatcompletionresponsemessage.md) | :heavy_check_mark: | A chat completion message generated by the model. | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md b/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md index eafea0f..4933ac7 100755 --- a/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md +++ b/docs/models/shared/createchatcompletionresponsechoicesfinishreason.md @@ -1,14 +1,17 @@ # CreateChatCompletionResponseChoicesFinishReason The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, -`length` if the maximum number of tokens specified in the request was reached, or `function_call` if the model called a function. +`length` if the maximum number of tokens specified in the request was reached, +`content_filter` if content was omitted due to a flag from our content filters, +or `function_call` if the model called a function. ## Values -| Name | Value | -| -------------- | -------------- | -| `Stop` | stop | -| `Length` | length | -| `FunctionCall` | function_call | \ No newline at end of file +| Name | Value | +| --------------- | --------------- | +| `Stop` | stop | +| `Length` | length | +| `FunctionCall` | function_call | +| `ContentFilter` | content_filter | \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponsechoices.md b/docs/models/shared/createcompletionresponsechoices.md index 2445ca8..ea26ded 100755 --- a/docs/models/shared/createcompletionresponsechoices.md +++ b/docs/models/shared/createcompletionresponsechoices.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `finishReason` | [CreateCompletionResponseChoicesFinishReason](../../models/shared/createcompletionresponsechoicesfinishreason.md) | :heavy_check_mark: | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,
or `length` if the maximum number of tokens specified in the request was reached.
| -| `index` | *number* | :heavy_check_mark: | N/A | -| `logprobs` | [CreateCompletionResponseChoicesLogprobs](../../models/shared/createcompletionresponsechoiceslogprobs.md) | :heavy_check_mark: | N/A | -| `text` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `finishReason` | [CreateCompletionResponseChoicesFinishReason](../../models/shared/createcompletionresponsechoicesfinishreason.md) | :heavy_check_mark: | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,
`length` if the maximum number of tokens specified in the request was reached,
or `content_filter` if content was omitted due to a flag from our content filters.
| +| `index` | *number* | :heavy_check_mark: | N/A | +| `logprobs` | [CreateCompletionResponseChoicesLogprobs](../../models/shared/createcompletionresponsechoiceslogprobs.md) | :heavy_check_mark: | N/A | +| `text` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createcompletionresponsechoicesfinishreason.md b/docs/models/shared/createcompletionresponsechoicesfinishreason.md index 34b66fe..a275a51 100755 --- a/docs/models/shared/createcompletionresponsechoicesfinishreason.md +++ b/docs/models/shared/createcompletionresponsechoicesfinishreason.md @@ -1,13 +1,15 @@ # CreateCompletionResponseChoicesFinishReason The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, -or `length` if the maximum number of tokens specified in the request was reached. +`length` if the maximum number of tokens specified in the request was reached, +or `content_filter` if content was omitted due to a flag from our content filters. ## Values -| Name | Value | -| -------- | -------- | -| `Stop` | stop | -| `Length` | length | \ No newline at end of file +| Name | Value | +| --------------- | --------------- | +| `Stop` | stop | +| `Length` | length | +| `ContentFilter` | content_filter | \ No newline at end of file diff --git a/docs/models/shared/createeditresponsechoices.md b/docs/models/shared/createeditresponsechoices.md index 8c90e20..5a6a696 100755 --- a/docs/models/shared/createeditresponsechoices.md +++ b/docs/models/shared/createeditresponsechoices.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `finishReason` | [CreateEditResponseChoicesFinishReason](../../models/shared/createeditresponsechoicesfinishreason.md) | :heavy_check_mark: | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,
or `length` if the maximum number of tokens specified in the request was reached.
| -| `index` | *number* | :heavy_check_mark: | The index of the choice in the list of choices. | -| `text` | *string* | :heavy_check_mark: | The edited result. | \ No newline at end of file +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `finishReason` | [CreateEditResponseChoicesFinishReason](../../models/shared/createeditresponsechoicesfinishreason.md) | :heavy_check_mark: | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,
`length` if the maximum number of tokens specified in the request was reached,
or `content_filter` if content was omitted due to a flag from our content filters.
| +| `index` | *number* | :heavy_check_mark: | The index of the choice in the list of choices. | +| `text` | *string* | :heavy_check_mark: | The edited result. | \ No newline at end of file diff --git a/docs/models/shared/createeditresponsechoicesfinishreason.md b/docs/models/shared/createeditresponsechoicesfinishreason.md index 9257539..88c981b 100755 --- a/docs/models/shared/createeditresponsechoicesfinishreason.md +++ b/docs/models/shared/createeditresponsechoicesfinishreason.md @@ -1,7 +1,8 @@ # CreateEditResponseChoicesFinishReason The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, -or `length` if the maximum number of tokens specified in the request was reached. +`length` if the maximum number of tokens specified in the request was reached, +or `content_filter` if content was omitted due to a flag from our content filters. diff --git a/gen.yaml b/gen.yaml index b27a687..08350e2 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,9 +1,9 @@ configVersion: 1.0.0 management: - docChecksum: 4fc5f5c34468b8e129c95fc458c47f4b + docChecksum: 6b6973cd0a05cc2c2d531357a1c0c034 docVersion: 2.0.0 - speakeasyVersion: 1.86.0 - generationVersion: 2.115.2 + speakeasyVersion: 1.88.0 + generationVersion: 2.118.1 generation: sdkClassName: gpt sdkFlattening: true @@ -11,12 +11,12 @@ generation: telemetryEnabled: false features: typescript: - core: 2.87.2 + core: 2.87.3 deprecations: 2.81.1 globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.22.5 + version: 2.22.6 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 901f912..868a8a3 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.5", + "version": "2.22.6", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.22.5", + "version": "2.22.6", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index f860b86..62cc340 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.5", + "version": "2.22.6", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/createchatcompletionresponse.ts b/src/sdk/models/shared/createchatcompletionresponse.ts index 41f0aa9..7157336 100755 --- a/src/sdk/models/shared/createchatcompletionresponse.ts +++ b/src/sdk/models/shared/createchatcompletionresponse.ts @@ -11,13 +11,16 @@ import { Expose, Type } from "class-transformer"; * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, * * @remarks - * `length` if the maximum number of tokens specified in the request was reached, or `function_call` if the model called a function. + * `length` if the maximum number of tokens specified in the request was reached, + * `content_filter` if content was omitted due to a flag from our content filters, + * or `function_call` if the model called a function. * */ export enum CreateChatCompletionResponseChoicesFinishReason { Stop = "stop", Length = "length", FunctionCall = "function_call", + ContentFilter = "content_filter", } export class CreateChatCompletionResponseChoices extends SpeakeasyBase { @@ -25,7 +28,9 @@ export class CreateChatCompletionResponseChoices extends SpeakeasyBase { * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, * * @remarks - * `length` if the maximum number of tokens specified in the request was reached, or `function_call` if the model called a function. + * `length` if the maximum number of tokens specified in the request was reached, + * `content_filter` if content was omitted due to a flag from our content filters, + * or `function_call` if the model called a function. * */ @SpeakeasyMetadata() diff --git a/src/sdk/models/shared/createcompletionresponse.ts b/src/sdk/models/shared/createcompletionresponse.ts index 5e7e50f..efedade 100755 --- a/src/sdk/models/shared/createcompletionresponse.ts +++ b/src/sdk/models/shared/createcompletionresponse.ts @@ -10,12 +10,14 @@ import { Expose, Type } from "class-transformer"; * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, * * @remarks - * or `length` if the maximum number of tokens specified in the request was reached. + * `length` if the maximum number of tokens specified in the request was reached, + * or `content_filter` if content was omitted due to a flag from our content filters. * */ export enum CreateCompletionResponseChoicesFinishReason { Stop = "stop", Length = "length", + ContentFilter = "content_filter", } export class CreateCompletionResponseChoicesLogprobs extends SpeakeasyBase { @@ -41,7 +43,8 @@ export class CreateCompletionResponseChoices extends SpeakeasyBase { * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, * * @remarks - * or `length` if the maximum number of tokens specified in the request was reached. + * `length` if the maximum number of tokens specified in the request was reached, + * or `content_filter` if content was omitted due to a flag from our content filters. * */ @SpeakeasyMetadata() diff --git a/src/sdk/models/shared/createeditresponse.ts b/src/sdk/models/shared/createeditresponse.ts index 0b38720..c09844b 100755 --- a/src/sdk/models/shared/createeditresponse.ts +++ b/src/sdk/models/shared/createeditresponse.ts @@ -10,7 +10,8 @@ import { Expose, Type } from "class-transformer"; * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, * * @remarks - * or `length` if the maximum number of tokens specified in the request was reached. + * `length` if the maximum number of tokens specified in the request was reached, + * or `content_filter` if content was omitted due to a flag from our content filters. * */ export enum CreateEditResponseChoicesFinishReason { @@ -23,7 +24,8 @@ export class CreateEditResponseChoices extends SpeakeasyBase { * The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, * * @remarks - * or `length` if the maximum number of tokens specified in the request was reached. + * `length` if the maximum number of tokens specified in the request was reached, + * or `content_filter` if content was omitted due to a flag from our content filters. * */ @SpeakeasyMetadata() diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index 0c0c9ee..534eec5 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -191,7 +191,7 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/chat/completions"; - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; try { [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); @@ -210,8 +210,7 @@ export class OpenAI { } const properties = utils.parseSecurityProperties(globalSecurity); const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); + if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; headers[ @@ -279,7 +278,7 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/completions"; - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; try { [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); @@ -298,8 +297,7 @@ export class OpenAI { } const properties = utils.parseSecurityProperties(globalSecurity); const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); + if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; headers[ @@ -368,7 +366,7 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/edits"; - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; try { [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); @@ -387,8 +385,7 @@ export class OpenAI { } const properties = utils.parseSecurityProperties(globalSecurity); const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); + if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; headers[ @@ -455,7 +452,7 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/embeddings"; - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; try { [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); @@ -474,8 +471,7 @@ export class OpenAI { } const properties = utils.parseSecurityProperties(globalSecurity); const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); + if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; headers[ @@ -543,7 +539,7 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/files"; - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; try { [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); @@ -562,8 +558,7 @@ export class OpenAI { } const properties = utils.parseSecurityProperties(globalSecurity); const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); + if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; headers[ @@ -634,7 +629,7 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/fine-tunes"; - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; try { [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); @@ -653,8 +648,7 @@ export class OpenAI { } const properties = utils.parseSecurityProperties(globalSecurity); const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); + if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; headers[ @@ -723,7 +717,7 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/fine_tuning/jobs"; - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; try { [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); @@ -742,8 +736,7 @@ export class OpenAI { } const properties = utils.parseSecurityProperties(globalSecurity); const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); + if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; headers[ @@ -811,7 +804,7 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/images/generations"; - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; try { [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); @@ -830,8 +823,7 @@ export class OpenAI { } const properties = utils.parseSecurityProperties(globalSecurity); const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); + if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; headers[ @@ -898,7 +890,7 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/images/edits"; - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; try { [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); @@ -917,8 +909,7 @@ export class OpenAI { } const properties = utils.parseSecurityProperties(globalSecurity); const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); + if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; headers[ @@ -985,7 +976,7 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/images/variations"; - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; try { [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); @@ -1004,8 +995,7 @@ export class OpenAI { } const properties = utils.parseSecurityProperties(globalSecurity); const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); + if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; headers[ @@ -1073,7 +1063,7 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/moderations"; - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; try { [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); @@ -1092,8 +1082,7 @@ export class OpenAI { } const properties = utils.parseSecurityProperties(globalSecurity); const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); + if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; headers[ @@ -1160,7 +1149,7 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/audio/transcriptions"; - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; try { [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); @@ -1179,8 +1168,7 @@ export class OpenAI { } const properties = utils.parseSecurityProperties(globalSecurity); const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); + if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; headers[ @@ -1248,7 +1236,7 @@ export class OpenAI { ); const url: string = baseURL.replace(/\/$/, "") + "/audio/translations"; - let [reqBodyHeaders, reqBody]: [object, any] = [{}, {}]; + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; try { [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); @@ -1267,8 +1255,7 @@ export class OpenAI { } const properties = utils.parseSecurityProperties(globalSecurity); const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; - if (reqBody == null || Object.keys(reqBody).length === 0) - throw new Error("request body is required"); + if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; headers[ diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 6049cee..71215e6 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -48,8 +48,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.22.5"; - genVersion = "2.115.2"; + sdkVersion = "2.22.6"; + genVersion = "2.118.1"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From b136844baea2ba39cf3867e891534d2c90e0065e Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Thu, 21 Sep 2023 00:54:40 +0000 Subject: [PATCH 53/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.88.1 --- RELEASES.md | 12 ++++- gen.yaml | 8 ++-- package-lock.json | 4 +- package.json | 2 +- src/sdk/openai.ts | 110 ++++++++++++++++++++++++++++++++++------------ src/sdk/sdk.ts | 4 +- 6 files changed, 101 insertions(+), 39 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 7d1809f..be3d027 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -600,4 +600,14 @@ Based on: ### Generated - [typescript v2.22.6] . ### Releases -- [NPM v2.22.6] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.6 - . \ No newline at end of file +- [NPM v2.22.6] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.6 - . + +## 2023-09-21 00:54:16 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.88.1 (2.122.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.22.7] . +### Releases +- [NPM v2.22.7] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.7 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 08350e2..aee87d0 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: 6b6973cd0a05cc2c2d531357a1c0c034 docVersion: 2.0.0 - speakeasyVersion: 1.88.0 - generationVersion: 2.118.1 + speakeasyVersion: 1.88.1 + generationVersion: 2.122.1 generation: sdkClassName: gpt sdkFlattening: true @@ -11,12 +11,12 @@ generation: telemetryEnabled: false features: typescript: - core: 2.87.3 + core: 2.87.4 deprecations: 2.81.1 globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.22.6 + version: 2.22.7 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 868a8a3..550e037 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.6", + "version": "2.22.7", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.22.6", + "version": "2.22.7", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 62cc340..103fcfa 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.6", + "version": "2.22.7", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index 534eec5..ff92665 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -7,7 +7,7 @@ import * as errors from "./models/errors"; import * as operations from "./models/operations"; import * as shared from "./models/shared"; import { SDKConfiguration } from "./sdk"; -import { AxiosInstance, AxiosRequestConfig, AxiosResponse } from "axios"; +import { AxiosInstance, AxiosRequestConfig, AxiosResponse, RawAxiosRequestHeaders } from "axios"; /** * The OpenAI REST API @@ -48,7 +48,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -124,7 +124,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -209,7 +209,11 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -296,7 +300,11 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -384,7 +392,11 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -470,7 +482,11 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -557,7 +573,11 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -647,7 +667,11 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -735,7 +759,11 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -822,7 +850,11 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -908,7 +940,11 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -994,7 +1030,11 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -1081,7 +1121,11 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -1167,7 +1211,11 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -1254,7 +1302,11 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...reqBodyHeaders, ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; @@ -1330,7 +1382,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -1404,7 +1456,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -1478,7 +1530,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -1542,7 +1594,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -1619,7 +1671,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; const queryParams: string = utils.serializeQueryParams(req); headers["Accept"] = "application/json"; @@ -1691,7 +1743,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -1770,7 +1822,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; const queryParams: string = utils.serializeQueryParams(req); headers["Accept"] = "application/json"; @@ -1839,7 +1891,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -1914,7 +1966,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; const queryParams: string = utils.serializeQueryParams(req); headers["Accept"] = "application/json"; @@ -1990,7 +2042,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -2066,7 +2118,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -2144,7 +2196,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ @@ -2219,7 +2271,7 @@ export class OpenAI { globalSecurity = new shared.Security(globalSecurity); } const properties = utils.parseSecurityProperties(globalSecurity); - const headers = { ...config?.headers, ...properties.headers }; + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; headers[ diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 71215e6..725d88c 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -48,8 +48,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.22.6"; - genVersion = "2.118.1"; + sdkVersion = "2.22.7"; + genVersion = "2.122.1"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From a90b1d60e52acca47a1e87ba6f6a799c7d79b5a8 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 22 Sep 2023 00:55:08 +0000 Subject: [PATCH 54/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.89.0 --- README.md | 28 +++++++++++++++++-- RELEASES.md | 12 +++++++- .../operations/cancelfinetuneresponse.md | 12 ++++---- .../operations/cancelfinetuningjobresponse.md | 2 +- .../createchatcompletionresponse.md | 2 +- .../operations/createcompletionresponse.md | 2 +- docs/models/operations/createeditresponse.md | 2 +- .../operations/createembeddingresponse.md | 2 +- docs/models/operations/createfileresponse.md | 12 ++++---- .../operations/createfinetuneresponse.md | 12 ++++---- .../operations/createfinetuningjobresponse.md | 2 +- .../operations/createimageeditresponse.md | 2 +- docs/models/operations/createimageresponse.md | 2 +- .../createimagevariationresponse.md | 2 +- .../operations/createmoderationresponse.md | 2 +- .../operations/createtranscriptionresponse.md | 2 +- .../operations/createtranslationresponse.md | 2 +- docs/models/operations/deletefileresponse.md | 2 +- docs/models/operations/deletemodelresponse.md | 2 +- .../models/operations/downloadfileresponse.md | 12 ++++---- docs/models/operations/listfilesresponse.md | 2 +- .../operations/listfinetuneeventsresponse.md | 2 +- .../operations/listfinetunesresponse.md | 2 +- .../listfinetuningeventsresponse.md | 2 +- docs/models/operations/listmodelsresponse.md | 2 +- .../listpaginatedfinetuningjobsresponse.md | 2 +- .../models/operations/retrievefileresponse.md | 12 ++++---- .../operations/retrievefinetuneresponse.md | 12 ++++---- .../retrievefinetuningjobresponse.md | 2 +- .../operations/retrievemodelresponse.md | 12 ++++---- .../shared/createcompletionrequestmodel2.md | 23 +++++++-------- docs/models/shared/finetuningjob.md | 4 +-- docs/models/shared/finetuningjoberror.md | 6 ++-- .../shared/finetuningjobhyperparameters.md | 2 +- .../finetuningjobhyperparametersnepochs1.md | 2 +- files.gen | 4 +-- gen.yaml | 10 +++---- package-lock.json | 4 +-- package.json | 2 +- .../models/shared/createcompletionrequest.ts | 1 + src/sdk/models/shared/finetuningjob.ts | 16 +++++------ src/sdk/sdk.ts | 4 +-- 42 files changed, 140 insertions(+), 104 deletions(-) diff --git a/README.md b/README.md index e42a686..0068d0f 100755 --- a/README.md +++ b/README.md @@ -39,8 +39,6 @@ Authorization: Bearer YOUR_API_KEY ## SDK Example Usage - - ```typescript import { Gpt } from "@speakeasy-api/openai"; import { CancelFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; @@ -120,4 +118,30 @@ Response includes details of the enqueued job including job status and the name * [retrieveModel](docs/sdks/openai/README.md#retrievemodel) - Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + + + + + + + + + + + +# Pagination + +Some of the endpoints in this SDK support pagination. To use pagination, you make your SDK calls as usual, but the +returned response object will have a `next` method that can be called to pull down the next group of results. If the +return value of `next` is `null`, then there are no more pages to be fetched. + +Here's an example of one such pagination call: + + + + + + + + ### SDK Generated by [Speakeasy](https://docs.speakeasyapi.dev/docs/using-speakeasy/client-sdks) diff --git a/RELEASES.md b/RELEASES.md index be3d027..18495be 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -610,4 +610,14 @@ Based on: ### Generated - [typescript v2.22.7] . ### Releases -- [NPM v2.22.7] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.7 - . \ No newline at end of file +- [NPM v2.22.7] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.7 - . + +## 2023-09-22 00:54:44 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.89.0 (2.125.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.22.8] . +### Releases +- [NPM v2.22.8] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.8 - . \ No newline at end of file diff --git a/docs/models/operations/cancelfinetuneresponse.md b/docs/models/operations/cancelfinetuneresponse.md index 72af593..58e5848 100755 --- a/docs/models/operations/cancelfinetuneresponse.md +++ b/docs/models/operations/cancelfinetuneresponse.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | -| `fineTune` | [shared.FineTune](../../models/shared/finetune.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `fineTune` | [shared.FineTune](../../models/shared/finetune.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/cancelfinetuningjobresponse.md b/docs/models/operations/cancelfinetuningjobresponse.md index 45c34c3..0db8fae 100755 --- a/docs/models/operations/cancelfinetuningjobresponse.md +++ b/docs/models/operations/cancelfinetuningjobresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `fineTuningJob` | [shared.FineTuningJob](../../models/shared/finetuningjob.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createchatcompletionresponse.md b/docs/models/operations/createchatcompletionresponse.md index abd2419..fc1b1f5 100755 --- a/docs/models/operations/createchatcompletionresponse.md +++ b/docs/models/operations/createchatcompletionresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `createChatCompletionResponse` | [shared.CreateChatCompletionResponse](../../models/shared/createchatcompletionresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createcompletionresponse.md b/docs/models/operations/createcompletionresponse.md index 2bcda50..299c999 100755 --- a/docs/models/operations/createcompletionresponse.md +++ b/docs/models/operations/createcompletionresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `createCompletionResponse` | [shared.CreateCompletionResponse](../../models/shared/createcompletionresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createeditresponse.md b/docs/models/operations/createeditresponse.md index 38f3429..daf8445 100755 --- a/docs/models/operations/createeditresponse.md +++ b/docs/models/operations/createeditresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `createEditResponse` | [shared.CreateEditResponse](../../models/shared/createeditresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createembeddingresponse.md b/docs/models/operations/createembeddingresponse.md index bae0066..e89e528 100755 --- a/docs/models/operations/createembeddingresponse.md +++ b/docs/models/operations/createembeddingresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `createEmbeddingResponse` | [shared.CreateEmbeddingResponse](../../models/shared/createembeddingresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createfileresponse.md b/docs/models/operations/createfileresponse.md index 4be2e54..2d5ba03 100755 --- a/docs/models/operations/createfileresponse.md +++ b/docs/models/operations/createfileresponse.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | -| `openAIFile` | [shared.OpenAIFile](../../models/shared/openaifile.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `openAIFile` | [shared.OpenAIFile](../../models/shared/openaifile.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createfinetuneresponse.md b/docs/models/operations/createfinetuneresponse.md index 385bf3f..d6568d5 100755 --- a/docs/models/operations/createfinetuneresponse.md +++ b/docs/models/operations/createfinetuneresponse.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | -| `fineTune` | [shared.FineTune](../../models/shared/finetune.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `fineTune` | [shared.FineTune](../../models/shared/finetune.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createfinetuningjobresponse.md b/docs/models/operations/createfinetuningjobresponse.md index b191147..2d0eabe 100755 --- a/docs/models/operations/createfinetuningjobresponse.md +++ b/docs/models/operations/createfinetuningjobresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `fineTuningJob` | [shared.FineTuningJob](../../models/shared/finetuningjob.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createimageeditresponse.md b/docs/models/operations/createimageeditresponse.md index 8c7a6f8..222a066 100755 --- a/docs/models/operations/createimageeditresponse.md +++ b/docs/models/operations/createimageeditresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `imagesResponse` | [shared.ImagesResponse](../../models/shared/imagesresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createimageresponse.md b/docs/models/operations/createimageresponse.md index 2abaad8..2c109e9 100755 --- a/docs/models/operations/createimageresponse.md +++ b/docs/models/operations/createimageresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `imagesResponse` | [shared.ImagesResponse](../../models/shared/imagesresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createimagevariationresponse.md b/docs/models/operations/createimagevariationresponse.md index f5d958e..0307361 100755 --- a/docs/models/operations/createimagevariationresponse.md +++ b/docs/models/operations/createimagevariationresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `imagesResponse` | [shared.ImagesResponse](../../models/shared/imagesresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createmoderationresponse.md b/docs/models/operations/createmoderationresponse.md index d04c6fb..5a6beaa 100755 --- a/docs/models/operations/createmoderationresponse.md +++ b/docs/models/operations/createmoderationresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `createModerationResponse` | [shared.CreateModerationResponse](../../models/shared/createmoderationresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createtranscriptionresponse.md b/docs/models/operations/createtranscriptionresponse.md index 99c673c..52f06ec 100755 --- a/docs/models/operations/createtranscriptionresponse.md +++ b/docs/models/operations/createtranscriptionresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `createTranscriptionResponse` | [shared.CreateTranscriptionResponse](../../models/shared/createtranscriptionresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createtranslationresponse.md b/docs/models/operations/createtranslationresponse.md index 2818abd..5d836aa 100755 --- a/docs/models/operations/createtranslationresponse.md +++ b/docs/models/operations/createtranslationresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `createTranslationResponse` | [shared.CreateTranslationResponse](../../models/shared/createtranslationresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/deletefileresponse.md b/docs/models/operations/deletefileresponse.md index 2141bfd..51e9dfb 100755 --- a/docs/models/operations/deletefileresponse.md +++ b/docs/models/operations/deletefileresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `deleteFileResponse` | [shared.DeleteFileResponse](../../models/shared/deletefileresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/deletemodelresponse.md b/docs/models/operations/deletemodelresponse.md index 53e22d2..89fc25d 100755 --- a/docs/models/operations/deletemodelresponse.md +++ b/docs/models/operations/deletemodelresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `deleteModelResponse` | [shared.DeleteModelResponse](../../models/shared/deletemodelresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/downloadfileresponse.md b/docs/models/operations/downloadfileresponse.md index ecf092c..1daab0b 100755 --- a/docs/models/operations/downloadfileresponse.md +++ b/docs/models/operations/downloadfileresponse.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | -| `downloadFile200ApplicationJSONString` | *string* | :heavy_minus_sign: | OK | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | +| `downloadFile200ApplicationJSONString` | *string* | :heavy_minus_sign: | OK | \ No newline at end of file diff --git a/docs/models/operations/listfilesresponse.md b/docs/models/operations/listfilesresponse.md index 5807562..09bca30 100755 --- a/docs/models/operations/listfilesresponse.md +++ b/docs/models/operations/listfilesresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `listFilesResponse` | [shared.ListFilesResponse](../../models/shared/listfilesresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listfinetuneeventsresponse.md b/docs/models/operations/listfinetuneeventsresponse.md index f291b8d..74fe71c 100755 --- a/docs/models/operations/listfinetuneeventsresponse.md +++ b/docs/models/operations/listfinetuneeventsresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `listFineTuneEventsResponse` | [shared.ListFineTuneEventsResponse](../../models/shared/listfinetuneeventsresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listfinetunesresponse.md b/docs/models/operations/listfinetunesresponse.md index 5351fd6..03aea90 100755 --- a/docs/models/operations/listfinetunesresponse.md +++ b/docs/models/operations/listfinetunesresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `listFineTunesResponse` | [shared.ListFineTunesResponse](../../models/shared/listfinetunesresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listfinetuningeventsresponse.md b/docs/models/operations/listfinetuningeventsresponse.md index 3407008..3cd168d 100755 --- a/docs/models/operations/listfinetuningeventsresponse.md +++ b/docs/models/operations/listfinetuningeventsresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `listFineTuningJobEventsResponse` | [shared.ListFineTuningJobEventsResponse](../../models/shared/listfinetuningjobeventsresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listmodelsresponse.md b/docs/models/operations/listmodelsresponse.md index 2bd95ce..9db7c75 100755 --- a/docs/models/operations/listmodelsresponse.md +++ b/docs/models/operations/listmodelsresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `listModelsResponse` | [shared.ListModelsResponse](../../models/shared/listmodelsresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listpaginatedfinetuningjobsresponse.md b/docs/models/operations/listpaginatedfinetuningjobsresponse.md index ea1f598..9a0cf8c 100755 --- a/docs/models/operations/listpaginatedfinetuningjobsresponse.md +++ b/docs/models/operations/listpaginatedfinetuningjobsresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `listPaginatedFineTuningJobsResponse` | [shared.ListPaginatedFineTuningJobsResponse](../../models/shared/listpaginatedfinetuningjobsresponse.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/retrievefileresponse.md b/docs/models/operations/retrievefileresponse.md index c8496ef..615027c 100755 --- a/docs/models/operations/retrievefileresponse.md +++ b/docs/models/operations/retrievefileresponse.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | -| `openAIFile` | [shared.OpenAIFile](../../models/shared/openaifile.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `openAIFile` | [shared.OpenAIFile](../../models/shared/openaifile.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/retrievefinetuneresponse.md b/docs/models/operations/retrievefinetuneresponse.md index 7824495..c6ecb79 100755 --- a/docs/models/operations/retrievefinetuneresponse.md +++ b/docs/models/operations/retrievefinetuneresponse.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | -| `fineTune` | [shared.FineTune](../../models/shared/finetune.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `fineTune` | [shared.FineTune](../../models/shared/finetune.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/retrievefinetuningjobresponse.md b/docs/models/operations/retrievefinetuningjobresponse.md index df145b5..ec10122 100755 --- a/docs/models/operations/retrievefinetuningjobresponse.md +++ b/docs/models/operations/retrievefinetuningjobresponse.md @@ -8,4 +8,4 @@ | `contentType` | *string* | :heavy_check_mark: | N/A | | `fineTuningJob` | [shared.FineTuningJob](../../models/shared/finetuningjob.md) | :heavy_minus_sign: | OK | | `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/retrievemodelresponse.md b/docs/models/operations/retrievemodelresponse.md index 5eedb5f..1bb3516 100755 --- a/docs/models/operations/retrievemodelresponse.md +++ b/docs/models/operations/retrievemodelresponse.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | -| `model` | [shared.Model](../../models/shared/model.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse>](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `contentType` | *string* | :heavy_check_mark: | N/A | +| `model` | [shared.Model](../../models/shared/model.md) | :heavy_minus_sign: | OK | +| `statusCode` | *number* | :heavy_check_mark: | N/A | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createcompletionrequestmodel2.md b/docs/models/shared/createcompletionrequestmodel2.md index 1702d82..91d4fa8 100755 --- a/docs/models/shared/createcompletionrequestmodel2.md +++ b/docs/models/shared/createcompletionrequestmodel2.md @@ -6,14 +6,15 @@ ID of the model to use. You can use the [List models](/docs/api-reference/models ## Values -| Name | Value | -| ---------------- | ---------------- | -| `Babbage002` | babbage-002 | -| `Davinci002` | davinci-002 | -| `TextDavinci003` | text-davinci-003 | -| `TextDavinci002` | text-davinci-002 | -| `TextDavinci001` | text-davinci-001 | -| `CodeDavinci002` | code-davinci-002 | -| `TextCurie001` | text-curie-001 | -| `TextBabbage001` | text-babbage-001 | -| `TextAda001` | text-ada-001 | \ No newline at end of file +| Name | Value | +| ---------------------- | ---------------------- | +| `Babbage002` | babbage-002 | +| `Davinci002` | davinci-002 | +| `Gpt35TurboInstruct` | gpt-3.5-turbo-instruct | +| `TextDavinci003` | text-davinci-003 | +| `TextDavinci002` | text-davinci-002 | +| `TextDavinci001` | text-davinci-001 | +| `CodeDavinci002` | code-davinci-002 | +| `TextCurie001` | text-curie-001 | +| `TextBabbage001` | text-babbage-001 | +| `TextAda001` | text-ada-001 | \ No newline at end of file diff --git a/docs/models/shared/finetuningjob.md b/docs/models/shared/finetuningjob.md index d0e29e4..c1b8500 100755 --- a/docs/models/shared/finetuningjob.md +++ b/docs/models/shared/finetuningjob.md @@ -11,14 +11,14 @@ The `fine_tuning.job` object represents a fine-tuning job that has been created | `createdAt` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) for when the fine-tuning job was created. | | `error` | [FineTuningJobError](../../models/shared/finetuningjoberror.md) | :heavy_check_mark: | For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. | | `fineTunedModel` | *string* | :heavy_check_mark: | The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. | -| `finishedAt` | *number* | :heavy_minus_sign: | The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. | +| `finishedAt` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. | | `hyperparameters` | [FineTuningJobHyperparameters](../../models/shared/finetuningjobhyperparameters.md) | :heavy_check_mark: | The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. | | `id` | *string* | :heavy_check_mark: | The object identifier, which can be referenced in the API endpoints. | | `model` | *string* | :heavy_check_mark: | The base model that is being fine-tuned. | | `object` | *string* | :heavy_check_mark: | The object type, which is always "fine_tuning.job". | | `organizationId` | *string* | :heavy_check_mark: | The organization that owns the fine-tuning job. | | `resultFiles` | *string*[] | :heavy_check_mark: | The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). | -| `status` | *string* | :heavy_check_mark: | The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, `succeeded`, `failed`, or `cancelled`. | +| `status` | *string* | :heavy_check_mark: | The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. | | `trainedTokens` | *number* | :heavy_check_mark: | The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. | | `trainingFile` | *string* | :heavy_check_mark: | The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). | | `validationFile` | *string* | :heavy_check_mark: | The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). | \ No newline at end of file diff --git a/docs/models/shared/finetuningjoberror.md b/docs/models/shared/finetuningjoberror.md index ca2efa3..66ebcda 100755 --- a/docs/models/shared/finetuningjoberror.md +++ b/docs/models/shared/finetuningjoberror.md @@ -7,6 +7,6 @@ For fine-tuning jobs that have `failed`, this will contain more information on t | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------ | -| `code` | *string* | :heavy_minus_sign: | A machine-readable error code. | -| `message` | *string* | :heavy_minus_sign: | A human-readable error message. | -| `param` | *string* | :heavy_minus_sign: | The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. | \ No newline at end of file +| `code` | *string* | :heavy_check_mark: | A machine-readable error code. | +| `message` | *string* | :heavy_check_mark: | A human-readable error message. | +| `param` | *string* | :heavy_check_mark: | The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. | \ No newline at end of file diff --git a/docs/models/shared/finetuningjobhyperparameters.md b/docs/models/shared/finetuningjobhyperparameters.md index a81da83..1c43063 100755 --- a/docs/models/shared/finetuningjobhyperparameters.md +++ b/docs/models/shared/finetuningjobhyperparameters.md @@ -7,4 +7,4 @@ The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/d | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `nEpochs` | *any* | :heavy_minus_sign: | The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
"Auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. | \ No newline at end of file +| `nEpochs` | *any* | :heavy_check_mark: | The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
"auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. | \ No newline at end of file diff --git a/docs/models/shared/finetuningjobhyperparametersnepochs1.md b/docs/models/shared/finetuningjobhyperparametersnepochs1.md index 3525a69..8774e43 100755 --- a/docs/models/shared/finetuningjobhyperparametersnepochs1.md +++ b/docs/models/shared/finetuningjobhyperparametersnepochs1.md @@ -1,7 +1,7 @@ # FineTuningJobHyperparametersNEpochs1 The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. -"Auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. +"auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. ## Values diff --git a/files.gen b/files.gen index afd2a6b..0f8b21b 100755 --- a/files.gen +++ b/files.gen @@ -93,8 +93,6 @@ src/sdk/models/shared/listpaginatedfinetuningjobsresponse.ts src/sdk/models/shared/security.ts src/sdk/models/shared/index.ts src/sdk/models/errors/index.ts -docs/sdks/gpt/README.md -docs/sdks/openai/README.md USAGE.md docs/models/operations/cancelfinetunerequest.md docs/models/operations/cancelfinetuneresponse.md @@ -224,4 +222,6 @@ docs/models/shared/listmodelsresponse.md docs/models/shared/model.md docs/models/shared/listpaginatedfinetuningjobsresponse.md docs/models/shared/security.md +docs/sdks/gpt/README.md +docs/sdks/openai/README.md .gitattributes \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index aee87d0..042e91e 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,9 +1,9 @@ configVersion: 1.0.0 management: - docChecksum: 6b6973cd0a05cc2c2d531357a1c0c034 + docChecksum: ad804f44b6fe212c8ed113a0291ffdaf docVersion: 2.0.0 - speakeasyVersion: 1.88.1 - generationVersion: 2.122.1 + speakeasyVersion: 1.89.0 + generationVersion: 2.125.1 generation: sdkClassName: gpt sdkFlattening: true @@ -11,12 +11,12 @@ generation: telemetryEnabled: false features: typescript: - core: 2.87.4 + core: 2.87.5 deprecations: 2.81.1 globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.22.7 + version: 2.22.8 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 550e037..fa03342 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.7", + "version": "2.22.8", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.22.7", + "version": "2.22.8", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 103fcfa..8958512 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.7", + "version": "2.22.8", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/createcompletionrequest.ts b/src/sdk/models/shared/createcompletionrequest.ts index 8fc4c57..2ca9fa2 100755 --- a/src/sdk/models/shared/createcompletionrequest.ts +++ b/src/sdk/models/shared/createcompletionrequest.ts @@ -14,6 +14,7 @@ import { Expose } from "class-transformer"; export enum CreateCompletionRequestModel2 { Babbage002 = "babbage-002", Davinci002 = "davinci-002", + Gpt35TurboInstruct = "gpt-3.5-turbo-instruct", TextDavinci003 = "text-davinci-003", TextDavinci002 = "text-davinci-002", TextDavinci001 = "text-davinci-001", diff --git a/src/sdk/models/shared/finetuningjob.ts b/src/sdk/models/shared/finetuningjob.ts index 1339f8f..4c15cb0 100755 --- a/src/sdk/models/shared/finetuningjob.ts +++ b/src/sdk/models/shared/finetuningjob.ts @@ -14,28 +14,28 @@ export class FineTuningJobError extends SpeakeasyBase { */ @SpeakeasyMetadata() @Expose({ name: "code" }) - code?: string; + code: string; /** * A human-readable error message. */ @SpeakeasyMetadata() @Expose({ name: "message" }) - message?: string; + message: string; /** * The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. */ @SpeakeasyMetadata() @Expose({ name: "param" }) - param?: string; + param: string; } /** * The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. * * @remarks - * "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + * "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. */ export enum FineTuningJobHyperparametersNEpochs1 { Auto = "auto", @@ -49,11 +49,11 @@ export class FineTuningJobHyperparameters extends SpeakeasyBase { * The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. * * @remarks - * "Auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + * "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. */ @SpeakeasyMetadata() @Expose({ name: "n_epochs" }) - nEpochs?: any; + nEpochs: any; } /** @@ -90,7 +90,7 @@ export class FineTuningJob extends SpeakeasyBase { */ @SpeakeasyMetadata() @Expose({ name: "finished_at" }) - finishedAt?: number; + finishedAt: number; /** * The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. @@ -136,7 +136,7 @@ export class FineTuningJob extends SpeakeasyBase { resultFiles: string[]; /** - * The current status of the fine-tuning job, which can be either `created`, `pending`, `running`, `succeeded`, `failed`, or `cancelled`. + * The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. */ @SpeakeasyMetadata() @Expose({ name: "status" }) diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 725d88c..a24c05d 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -48,8 +48,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.22.7"; - genVersion = "2.122.1"; + sdkVersion = "2.22.8"; + genVersion = "2.125.1"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From e44c6734a6842c17bc87948e9220e4a77890157b Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Tue, 26 Sep 2023 00:55:39 +0000 Subject: [PATCH 55/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.91.0 --- README.md | 4 ---- RELEASES.md | 12 +++++++++++- docs/models/operations/cancelfinetuneresponse.md | 6 +++--- .../models/operations/cancelfinetuningjobresponse.md | 6 +++--- .../operations/createchatcompletionresponse.md | 6 +++--- docs/models/operations/createcompletionresponse.md | 6 +++--- docs/models/operations/createeditresponse.md | 6 +++--- docs/models/operations/createembeddingresponse.md | 6 +++--- docs/models/operations/createfileresponse.md | 6 +++--- docs/models/operations/createfinetuneresponse.md | 6 +++--- .../models/operations/createfinetuningjobresponse.md | 6 +++--- docs/models/operations/createimageeditresponse.md | 6 +++--- docs/models/operations/createimageresponse.md | 6 +++--- .../operations/createimagevariationresponse.md | 6 +++--- docs/models/operations/createmoderationresponse.md | 6 +++--- .../models/operations/createtranscriptionresponse.md | 6 +++--- docs/models/operations/createtranslationresponse.md | 6 +++--- docs/models/operations/deletefileresponse.md | 6 +++--- docs/models/operations/deletemodelresponse.md | 6 +++--- docs/models/operations/downloadfileresponse.md | 6 +++--- docs/models/operations/listfilesresponse.md | 6 +++--- docs/models/operations/listfinetuneeventsresponse.md | 6 +++--- docs/models/operations/listfinetunesresponse.md | 6 +++--- .../operations/listfinetuningeventsresponse.md | 6 +++--- docs/models/operations/listmodelsresponse.md | 6 +++--- .../listpaginatedfinetuningjobsresponse.md | 6 +++--- docs/models/operations/retrievefileresponse.md | 6 +++--- docs/models/operations/retrievefinetuneresponse.md | 6 +++--- .../operations/retrievefinetuningjobresponse.md | 6 +++--- docs/models/operations/retrievemodelresponse.md | 6 +++--- gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/models/operations/cancelfinetune.ts | 9 +++++++++ src/sdk/models/operations/cancelfinetuningjob.ts | 9 +++++++++ src/sdk/models/operations/createchatcompletion.ts | 9 +++++++++ src/sdk/models/operations/createcompletion.ts | 9 +++++++++ src/sdk/models/operations/createedit.ts | 9 +++++++++ src/sdk/models/operations/createembedding.ts | 9 +++++++++ src/sdk/models/operations/createfile.ts | 9 +++++++++ src/sdk/models/operations/createfinetune.ts | 9 +++++++++ src/sdk/models/operations/createfinetuningjob.ts | 9 +++++++++ src/sdk/models/operations/createimage.ts | 9 +++++++++ src/sdk/models/operations/createimageedit.ts | 9 +++++++++ src/sdk/models/operations/createimagevariation.ts | 9 +++++++++ src/sdk/models/operations/createmoderation.ts | 9 +++++++++ src/sdk/models/operations/createtranscription.ts | 9 +++++++++ src/sdk/models/operations/createtranslation.ts | 9 +++++++++ src/sdk/models/operations/deletefile.ts | 9 +++++++++ src/sdk/models/operations/deletemodel.ts | 9 +++++++++ src/sdk/models/operations/downloadfile.ts | 9 +++++++++ src/sdk/models/operations/listfiles.ts | 9 +++++++++ src/sdk/models/operations/listfinetuneevents.ts | 9 +++++++++ src/sdk/models/operations/listfinetunes.ts | 9 +++++++++ src/sdk/models/operations/listfinetuningevents.ts | 9 +++++++++ src/sdk/models/operations/listmodels.ts | 9 +++++++++ .../models/operations/listpaginatedfinetuningjobs.ts | 9 +++++++++ src/sdk/models/operations/retrievefile.ts | 9 +++++++++ src/sdk/models/operations/retrievefinetune.ts | 9 +++++++++ src/sdk/models/operations/retrievefinetuningjob.ts | 9 +++++++++ src/sdk/models/operations/retrievemodel.ts | 9 +++++++++ src/sdk/sdk.ts | 4 ++-- 62 files changed, 356 insertions(+), 98 deletions(-) diff --git a/README.md b/README.md index 0068d0f..db75c76 100755 --- a/README.md +++ b/README.md @@ -122,8 +122,6 @@ Response includes details of the enqueued job including job status and the name - - @@ -136,8 +134,6 @@ returned response object will have a `next` method that can be called to pull do return value of `next` is `null`, then there are no more pages to be fetched. Here's an example of one such pagination call: - - diff --git a/RELEASES.md b/RELEASES.md index 18495be..1bf1652 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -620,4 +620,14 @@ Based on: ### Generated - [typescript v2.22.8] . ### Releases -- [NPM v2.22.8] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.8 - . \ No newline at end of file +- [NPM v2.22.8] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.22.8 - . + +## 2023-09-26 00:55:14 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.91.0 (2.129.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.23.0] . +### Releases +- [NPM v2.23.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.23.0 - . \ No newline at end of file diff --git a/docs/models/operations/cancelfinetuneresponse.md b/docs/models/operations/cancelfinetuneresponse.md index 58e5848..f2e3d2f 100755 --- a/docs/models/operations/cancelfinetuneresponse.md +++ b/docs/models/operations/cancelfinetuneresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `fineTune` | [shared.FineTune](../../models/shared/finetune.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/cancelfinetuningjobresponse.md b/docs/models/operations/cancelfinetuningjobresponse.md index 0db8fae..55c3ae0 100755 --- a/docs/models/operations/cancelfinetuningjobresponse.md +++ b/docs/models/operations/cancelfinetuningjobresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `fineTuningJob` | [shared.FineTuningJob](../../models/shared/finetuningjob.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/createchatcompletionresponse.md b/docs/models/operations/createchatcompletionresponse.md index fc1b1f5..3dd524c 100755 --- a/docs/models/operations/createchatcompletionresponse.md +++ b/docs/models/operations/createchatcompletionresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `createChatCompletionResponse` | [shared.CreateChatCompletionResponse](../../models/shared/createchatcompletionresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/createcompletionresponse.md b/docs/models/operations/createcompletionresponse.md index 299c999..215452a 100755 --- a/docs/models/operations/createcompletionresponse.md +++ b/docs/models/operations/createcompletionresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `createCompletionResponse` | [shared.CreateCompletionResponse](../../models/shared/createcompletionresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/createeditresponse.md b/docs/models/operations/createeditresponse.md index daf8445..36e2ac5 100755 --- a/docs/models/operations/createeditresponse.md +++ b/docs/models/operations/createeditresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `createEditResponse` | [shared.CreateEditResponse](../../models/shared/createeditresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/createembeddingresponse.md b/docs/models/operations/createembeddingresponse.md index e89e528..f1cde31 100755 --- a/docs/models/operations/createembeddingresponse.md +++ b/docs/models/operations/createembeddingresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `createEmbeddingResponse` | [shared.CreateEmbeddingResponse](../../models/shared/createembeddingresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/createfileresponse.md b/docs/models/operations/createfileresponse.md index 2d5ba03..015f9fb 100755 --- a/docs/models/operations/createfileresponse.md +++ b/docs/models/operations/createfileresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `openAIFile` | [shared.OpenAIFile](../../models/shared/openaifile.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/createfinetuneresponse.md b/docs/models/operations/createfinetuneresponse.md index d6568d5..cf1f98b 100755 --- a/docs/models/operations/createfinetuneresponse.md +++ b/docs/models/operations/createfinetuneresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `fineTune` | [shared.FineTune](../../models/shared/finetune.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/createfinetuningjobresponse.md b/docs/models/operations/createfinetuningjobresponse.md index 2d0eabe..f4d2ecc 100755 --- a/docs/models/operations/createfinetuningjobresponse.md +++ b/docs/models/operations/createfinetuningjobresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `fineTuningJob` | [shared.FineTuningJob](../../models/shared/finetuningjob.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/createimageeditresponse.md b/docs/models/operations/createimageeditresponse.md index 222a066..c2db5fe 100755 --- a/docs/models/operations/createimageeditresponse.md +++ b/docs/models/operations/createimageeditresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `imagesResponse` | [shared.ImagesResponse](../../models/shared/imagesresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/createimageresponse.md b/docs/models/operations/createimageresponse.md index 2c109e9..8c45c4c 100755 --- a/docs/models/operations/createimageresponse.md +++ b/docs/models/operations/createimageresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `imagesResponse` | [shared.ImagesResponse](../../models/shared/imagesresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/createimagevariationresponse.md b/docs/models/operations/createimagevariationresponse.md index 0307361..43a2c37 100755 --- a/docs/models/operations/createimagevariationresponse.md +++ b/docs/models/operations/createimagevariationresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `imagesResponse` | [shared.ImagesResponse](../../models/shared/imagesresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/createmoderationresponse.md b/docs/models/operations/createmoderationresponse.md index 5a6beaa..a010b55 100755 --- a/docs/models/operations/createmoderationresponse.md +++ b/docs/models/operations/createmoderationresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `createModerationResponse` | [shared.CreateModerationResponse](../../models/shared/createmoderationresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/createtranscriptionresponse.md b/docs/models/operations/createtranscriptionresponse.md index 52f06ec..4479578 100755 --- a/docs/models/operations/createtranscriptionresponse.md +++ b/docs/models/operations/createtranscriptionresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `createTranscriptionResponse` | [shared.CreateTranscriptionResponse](../../models/shared/createtranscriptionresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/createtranslationresponse.md b/docs/models/operations/createtranslationresponse.md index 5d836aa..2e4ef40 100755 --- a/docs/models/operations/createtranslationresponse.md +++ b/docs/models/operations/createtranslationresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `createTranslationResponse` | [shared.CreateTranslationResponse](../../models/shared/createtranslationresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/deletefileresponse.md b/docs/models/operations/deletefileresponse.md index 51e9dfb..6946657 100755 --- a/docs/models/operations/deletefileresponse.md +++ b/docs/models/operations/deletefileresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `deleteFileResponse` | [shared.DeleteFileResponse](../../models/shared/deletefileresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/deletemodelresponse.md b/docs/models/operations/deletemodelresponse.md index 89fc25d..aba1974 100755 --- a/docs/models/operations/deletemodelresponse.md +++ b/docs/models/operations/deletemodelresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `deleteModelResponse` | [shared.DeleteModelResponse](../../models/shared/deletemodelresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/downloadfileresponse.md b/docs/models/operations/downloadfileresponse.md index 1daab0b..2fb14f2 100755 --- a/docs/models/operations/downloadfileresponse.md +++ b/docs/models/operations/downloadfileresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | | `downloadFile200ApplicationJSONString` | *string* | :heavy_minus_sign: | OK | \ No newline at end of file diff --git a/docs/models/operations/listfilesresponse.md b/docs/models/operations/listfilesresponse.md index 09bca30..43b8e4a 100755 --- a/docs/models/operations/listfilesresponse.md +++ b/docs/models/operations/listfilesresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `listFilesResponse` | [shared.ListFilesResponse](../../models/shared/listfilesresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/listfinetuneeventsresponse.md b/docs/models/operations/listfinetuneeventsresponse.md index 74fe71c..68eeeb6 100755 --- a/docs/models/operations/listfinetuneeventsresponse.md +++ b/docs/models/operations/listfinetuneeventsresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `listFineTuneEventsResponse` | [shared.ListFineTuneEventsResponse](../../models/shared/listfinetuneeventsresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/listfinetunesresponse.md b/docs/models/operations/listfinetunesresponse.md index 03aea90..1cb6d82 100755 --- a/docs/models/operations/listfinetunesresponse.md +++ b/docs/models/operations/listfinetunesresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `listFineTunesResponse` | [shared.ListFineTunesResponse](../../models/shared/listfinetunesresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/listfinetuningeventsresponse.md b/docs/models/operations/listfinetuningeventsresponse.md index 3cd168d..230ed32 100755 --- a/docs/models/operations/listfinetuningeventsresponse.md +++ b/docs/models/operations/listfinetuningeventsresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `listFineTuningJobEventsResponse` | [shared.ListFineTuningJobEventsResponse](../../models/shared/listfinetuningjobeventsresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/listmodelsresponse.md b/docs/models/operations/listmodelsresponse.md index 9db7c75..9157946 100755 --- a/docs/models/operations/listmodelsresponse.md +++ b/docs/models/operations/listmodelsresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `listModelsResponse` | [shared.ListModelsResponse](../../models/shared/listmodelsresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/listpaginatedfinetuningjobsresponse.md b/docs/models/operations/listpaginatedfinetuningjobsresponse.md index 9a0cf8c..bac1eb4 100755 --- a/docs/models/operations/listpaginatedfinetuningjobsresponse.md +++ b/docs/models/operations/listpaginatedfinetuningjobsresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `listPaginatedFineTuningJobsResponse` | [shared.ListPaginatedFineTuningJobsResponse](../../models/shared/listpaginatedfinetuningjobsresponse.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/retrievefileresponse.md b/docs/models/operations/retrievefileresponse.md index 615027c..c596cbd 100755 --- a/docs/models/operations/retrievefileresponse.md +++ b/docs/models/operations/retrievefileresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `openAIFile` | [shared.OpenAIFile](../../models/shared/openaifile.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/retrievefinetuneresponse.md b/docs/models/operations/retrievefinetuneresponse.md index c6ecb79..682cf60 100755 --- a/docs/models/operations/retrievefinetuneresponse.md +++ b/docs/models/operations/retrievefinetuneresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `fineTune` | [shared.FineTune](../../models/shared/finetune.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/retrievefinetuningjobresponse.md b/docs/models/operations/retrievefinetuningjobresponse.md index ec10122..a80f1da 100755 --- a/docs/models/operations/retrievefinetuningjobresponse.md +++ b/docs/models/operations/retrievefinetuningjobresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `fineTuningJob` | [shared.FineTuningJob](../../models/shared/finetuningjob.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/docs/models/operations/retrievemodelresponse.md b/docs/models/operations/retrievemodelresponse.md index 1bb3516..974b317 100755 --- a/docs/models/operations/retrievemodelresponse.md +++ b/docs/models/operations/retrievemodelresponse.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | -| `contentType` | *string* | :heavy_check_mark: | N/A | +| `contentType` | *string* | :heavy_check_mark: | HTTP response content type for this operation | | `model` | [shared.Model](../../models/shared/model.md) | :heavy_minus_sign: | OK | -| `statusCode` | *number* | :heavy_check_mark: | N/A | -| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `statusCode` | *number* | :heavy_check_mark: | HTTP response status code for this operation | +| `rawResponse` | [AxiosResponse](https://axios-http.com/docs/res_schema) | :heavy_minus_sign: | Raw HTTP response; suitable for custom response parsing | \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 042e91e..571d98c 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: ad804f44b6fe212c8ed113a0291ffdaf docVersion: 2.0.0 - speakeasyVersion: 1.89.0 - generationVersion: 2.125.1 + speakeasyVersion: 1.91.0 + generationVersion: 2.129.1 generation: sdkClassName: gpt sdkFlattening: true @@ -11,12 +11,12 @@ generation: telemetryEnabled: false features: typescript: - core: 2.87.5 + core: 2.88.0 deprecations: 2.81.1 globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.22.8 + version: 2.23.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index fa03342..fba9c6c 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.8", + "version": "2.23.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.22.8", + "version": "2.23.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 8958512..9129a43 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.22.8", + "version": "2.23.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/operations/cancelfinetune.ts b/src/sdk/models/operations/cancelfinetune.ts index 6797488..91989fe 100755 --- a/src/sdk/models/operations/cancelfinetune.ts +++ b/src/sdk/models/operations/cancelfinetune.ts @@ -18,6 +18,9 @@ export class CancelFineTuneRequest extends SpeakeasyBase { } export class CancelFineTuneResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -27,9 +30,15 @@ export class CancelFineTuneResponse extends SpeakeasyBase { @SpeakeasyMetadata() fineTune?: shared.FineTune; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/cancelfinetuningjob.ts b/src/sdk/models/operations/cancelfinetuningjob.ts index 06601a7..a6038d3 100755 --- a/src/sdk/models/operations/cancelfinetuningjob.ts +++ b/src/sdk/models/operations/cancelfinetuningjob.ts @@ -18,6 +18,9 @@ export class CancelFineTuningJobRequest extends SpeakeasyBase { } export class CancelFineTuningJobResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -27,9 +30,15 @@ export class CancelFineTuningJobResponse extends SpeakeasyBase { @SpeakeasyMetadata() fineTuningJob?: shared.FineTuningJob; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createchatcompletion.ts b/src/sdk/models/operations/createchatcompletion.ts index 8b78169..0764acd 100755 --- a/src/sdk/models/operations/createchatcompletion.ts +++ b/src/sdk/models/operations/createchatcompletion.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateChatCompletionResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class CreateChatCompletionResponse extends SpeakeasyBase { @SpeakeasyMetadata() createChatCompletionResponse?: shared.CreateChatCompletionResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createcompletion.ts b/src/sdk/models/operations/createcompletion.ts index be937c7..3da3478 100755 --- a/src/sdk/models/operations/createcompletion.ts +++ b/src/sdk/models/operations/createcompletion.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateCompletionResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class CreateCompletionResponse extends SpeakeasyBase { @SpeakeasyMetadata() createCompletionResponse?: shared.CreateCompletionResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createedit.ts b/src/sdk/models/operations/createedit.ts index 372021d..53d7726 100755 --- a/src/sdk/models/operations/createedit.ts +++ b/src/sdk/models/operations/createedit.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateEditResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class CreateEditResponse extends SpeakeasyBase { @SpeakeasyMetadata() createEditResponse?: shared.CreateEditResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createembedding.ts b/src/sdk/models/operations/createembedding.ts index f8ce762..aa01dc3 100755 --- a/src/sdk/models/operations/createembedding.ts +++ b/src/sdk/models/operations/createembedding.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateEmbeddingResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class CreateEmbeddingResponse extends SpeakeasyBase { @SpeakeasyMetadata() createEmbeddingResponse?: shared.CreateEmbeddingResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createfile.ts b/src/sdk/models/operations/createfile.ts index 254af49..f236821 100755 --- a/src/sdk/models/operations/createfile.ts +++ b/src/sdk/models/operations/createfile.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateFileResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class CreateFileResponse extends SpeakeasyBase { @SpeakeasyMetadata() openAIFile?: shared.OpenAIFile; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createfinetune.ts b/src/sdk/models/operations/createfinetune.ts index 757664e..66d89ee 100755 --- a/src/sdk/models/operations/createfinetune.ts +++ b/src/sdk/models/operations/createfinetune.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateFineTuneResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class CreateFineTuneResponse extends SpeakeasyBase { @SpeakeasyMetadata() fineTune?: shared.FineTune; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createfinetuningjob.ts b/src/sdk/models/operations/createfinetuningjob.ts index c2c86ca..67aa487 100755 --- a/src/sdk/models/operations/createfinetuningjob.ts +++ b/src/sdk/models/operations/createfinetuningjob.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateFineTuningJobResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class CreateFineTuningJobResponse extends SpeakeasyBase { @SpeakeasyMetadata() fineTuningJob?: shared.FineTuningJob; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createimage.ts b/src/sdk/models/operations/createimage.ts index 4e973b4..133e34d 100755 --- a/src/sdk/models/operations/createimage.ts +++ b/src/sdk/models/operations/createimage.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateImageResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class CreateImageResponse extends SpeakeasyBase { @SpeakeasyMetadata() imagesResponse?: shared.ImagesResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createimageedit.ts b/src/sdk/models/operations/createimageedit.ts index e0ad72b..9b38533 100755 --- a/src/sdk/models/operations/createimageedit.ts +++ b/src/sdk/models/operations/createimageedit.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateImageEditResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class CreateImageEditResponse extends SpeakeasyBase { @SpeakeasyMetadata() imagesResponse?: shared.ImagesResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createimagevariation.ts b/src/sdk/models/operations/createimagevariation.ts index c16f193..e8ae138 100755 --- a/src/sdk/models/operations/createimagevariation.ts +++ b/src/sdk/models/operations/createimagevariation.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateImageVariationResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class CreateImageVariationResponse extends SpeakeasyBase { @SpeakeasyMetadata() imagesResponse?: shared.ImagesResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createmoderation.ts b/src/sdk/models/operations/createmoderation.ts index e936350..c7d9136 100755 --- a/src/sdk/models/operations/createmoderation.ts +++ b/src/sdk/models/operations/createmoderation.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateModerationResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class CreateModerationResponse extends SpeakeasyBase { @SpeakeasyMetadata() createModerationResponse?: shared.CreateModerationResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createtranscription.ts b/src/sdk/models/operations/createtranscription.ts index 4bf7e45..b3421ba 100755 --- a/src/sdk/models/operations/createtranscription.ts +++ b/src/sdk/models/operations/createtranscription.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateTranscriptionResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class CreateTranscriptionResponse extends SpeakeasyBase { @SpeakeasyMetadata() createTranscriptionResponse?: shared.CreateTranscriptionResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/createtranslation.ts b/src/sdk/models/operations/createtranslation.ts index fc8d2e6..da3d190 100755 --- a/src/sdk/models/operations/createtranslation.ts +++ b/src/sdk/models/operations/createtranslation.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class CreateTranslationResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class CreateTranslationResponse extends SpeakeasyBase { @SpeakeasyMetadata() createTranslationResponse?: shared.CreateTranslationResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/deletefile.ts b/src/sdk/models/operations/deletefile.ts index 751df75..bc87348 100755 --- a/src/sdk/models/operations/deletefile.ts +++ b/src/sdk/models/operations/deletefile.ts @@ -15,6 +15,9 @@ export class DeleteFileRequest extends SpeakeasyBase { } export class DeleteFileResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -24,9 +27,15 @@ export class DeleteFileResponse extends SpeakeasyBase { @SpeakeasyMetadata() deleteFileResponse?: shared.DeleteFileResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/deletemodel.ts b/src/sdk/models/operations/deletemodel.ts index 85b6afa..7dacc56 100755 --- a/src/sdk/models/operations/deletemodel.ts +++ b/src/sdk/models/operations/deletemodel.ts @@ -15,6 +15,9 @@ export class DeleteModelRequest extends SpeakeasyBase { } export class DeleteModelResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -24,9 +27,15 @@ export class DeleteModelResponse extends SpeakeasyBase { @SpeakeasyMetadata() deleteModelResponse?: shared.DeleteModelResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/downloadfile.ts b/src/sdk/models/operations/downloadfile.ts index dabdf89..0d71888 100755 --- a/src/sdk/models/operations/downloadfile.ts +++ b/src/sdk/models/operations/downloadfile.ts @@ -14,12 +14,21 @@ export class DownloadFileRequest extends SpeakeasyBase { } export class DownloadFileResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; diff --git a/src/sdk/models/operations/listfiles.ts b/src/sdk/models/operations/listfiles.ts index 516974c..7f61d7a 100755 --- a/src/sdk/models/operations/listfiles.ts +++ b/src/sdk/models/operations/listfiles.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class ListFilesResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class ListFilesResponse extends SpeakeasyBase { @SpeakeasyMetadata() listFilesResponse?: shared.ListFilesResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/listfinetuneevents.ts b/src/sdk/models/operations/listfinetuneevents.ts index aab8a6d..6fe08c5 100755 --- a/src/sdk/models/operations/listfinetuneevents.ts +++ b/src/sdk/models/operations/listfinetuneevents.ts @@ -34,6 +34,9 @@ export class ListFineTuneEventsRequest extends SpeakeasyBase { } export class ListFineTuneEventsResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -43,9 +46,15 @@ export class ListFineTuneEventsResponse extends SpeakeasyBase { @SpeakeasyMetadata() listFineTuneEventsResponse?: shared.ListFineTuneEventsResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/listfinetunes.ts b/src/sdk/models/operations/listfinetunes.ts index c16ff58..41ad0f0 100755 --- a/src/sdk/models/operations/listfinetunes.ts +++ b/src/sdk/models/operations/listfinetunes.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class ListFineTunesResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class ListFineTunesResponse extends SpeakeasyBase { @SpeakeasyMetadata() listFineTunesResponse?: shared.ListFineTunesResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/listfinetuningevents.ts b/src/sdk/models/operations/listfinetuningevents.ts index be1812b..981c054 100755 --- a/src/sdk/models/operations/listfinetuningevents.ts +++ b/src/sdk/models/operations/listfinetuningevents.ts @@ -30,6 +30,9 @@ export class ListFineTuningEventsRequest extends SpeakeasyBase { } export class ListFineTuningEventsResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -39,9 +42,15 @@ export class ListFineTuningEventsResponse extends SpeakeasyBase { @SpeakeasyMetadata() listFineTuningJobEventsResponse?: shared.ListFineTuningJobEventsResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/listmodels.ts b/src/sdk/models/operations/listmodels.ts index 2475554..8c01992 100755 --- a/src/sdk/models/operations/listmodels.ts +++ b/src/sdk/models/operations/listmodels.ts @@ -7,6 +7,9 @@ import * as shared from "../shared"; import { AxiosResponse } from "axios"; export class ListModelsResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -16,9 +19,15 @@ export class ListModelsResponse extends SpeakeasyBase { @SpeakeasyMetadata() listModelsResponse?: shared.ListModelsResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/listpaginatedfinetuningjobs.ts b/src/sdk/models/operations/listpaginatedfinetuningjobs.ts index 66b9cdc..ee6b1a7 100755 --- a/src/sdk/models/operations/listpaginatedfinetuningjobs.ts +++ b/src/sdk/models/operations/listpaginatedfinetuningjobs.ts @@ -21,6 +21,9 @@ export class ListPaginatedFineTuningJobsRequest extends SpeakeasyBase { } export class ListPaginatedFineTuningJobsResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -30,9 +33,15 @@ export class ListPaginatedFineTuningJobsResponse extends SpeakeasyBase { @SpeakeasyMetadata() listPaginatedFineTuningJobsResponse?: shared.ListPaginatedFineTuningJobsResponse; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/retrievefile.ts b/src/sdk/models/operations/retrievefile.ts index be3a1cf..860096a 100755 --- a/src/sdk/models/operations/retrievefile.ts +++ b/src/sdk/models/operations/retrievefile.ts @@ -15,6 +15,9 @@ export class RetrieveFileRequest extends SpeakeasyBase { } export class RetrieveFileResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -24,9 +27,15 @@ export class RetrieveFileResponse extends SpeakeasyBase { @SpeakeasyMetadata() openAIFile?: shared.OpenAIFile; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/retrievefinetune.ts b/src/sdk/models/operations/retrievefinetune.ts index 833b8fc..61064ad 100755 --- a/src/sdk/models/operations/retrievefinetune.ts +++ b/src/sdk/models/operations/retrievefinetune.ts @@ -18,6 +18,9 @@ export class RetrieveFineTuneRequest extends SpeakeasyBase { } export class RetrieveFineTuneResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -27,9 +30,15 @@ export class RetrieveFineTuneResponse extends SpeakeasyBase { @SpeakeasyMetadata() fineTune?: shared.FineTune; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/retrievefinetuningjob.ts b/src/sdk/models/operations/retrievefinetuningjob.ts index b32ee91..982a904 100755 --- a/src/sdk/models/operations/retrievefinetuningjob.ts +++ b/src/sdk/models/operations/retrievefinetuningjob.ts @@ -18,6 +18,9 @@ export class RetrieveFineTuningJobRequest extends SpeakeasyBase { } export class RetrieveFineTuningJobResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -27,9 +30,15 @@ export class RetrieveFineTuningJobResponse extends SpeakeasyBase { @SpeakeasyMetadata() fineTuningJob?: shared.FineTuningJob; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/models/operations/retrievemodel.ts b/src/sdk/models/operations/retrievemodel.ts index 33c4cf9..4055231 100755 --- a/src/sdk/models/operations/retrievemodel.ts +++ b/src/sdk/models/operations/retrievemodel.ts @@ -15,6 +15,9 @@ export class RetrieveModelRequest extends SpeakeasyBase { } export class RetrieveModelResponse extends SpeakeasyBase { + /** + * HTTP response content type for this operation + */ @SpeakeasyMetadata() contentType: string; @@ -24,9 +27,15 @@ export class RetrieveModelResponse extends SpeakeasyBase { @SpeakeasyMetadata() model?: shared.Model; + /** + * HTTP response status code for this operation + */ @SpeakeasyMetadata() statusCode: number; + /** + * Raw HTTP response; suitable for custom response parsing + */ @SpeakeasyMetadata() rawResponse?: AxiosResponse; } diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index a24c05d..ef93e3f 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -48,8 +48,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.22.8"; - genVersion = "2.125.1"; + sdkVersion = "2.23.0"; + genVersion = "2.129.1"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From 0e55fa924fba5ed6ecf4e8b3ed720b7d3fb22ecb Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 27 Sep 2023 00:55:41 +0000 Subject: [PATCH 56/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.91.2 --- README.md | 2 +- RELEASES.md | 12 +++++++++++- docs/sdks/gpt/README.md | 1 + docs/sdks/openai/README.md | 1 + gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 4 ++-- 8 files changed, 23 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index db75c76..4288195 100755 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ sdk.openAI.cancelFineTune({ ## Available Resources and Operations -### [OpenAI](docs/sdks/openai/README.md) +### [openAI](docs/sdks/openai/README.md) * [~~cancelFineTune~~](docs/sdks/openai/README.md#cancelfinetune) - Immediately cancel a fine-tune job. :warning: **Deprecated** diff --git a/RELEASES.md b/RELEASES.md index 1bf1652..a38799c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -630,4 +630,14 @@ Based on: ### Generated - [typescript v2.23.0] . ### Releases -- [NPM v2.23.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.23.0 - . \ No newline at end of file +- [NPM v2.23.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.23.0 - . + +## 2023-09-27 00:55:18 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.91.2 (2.131.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.23.1] . +### Releases +- [NPM v2.23.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.23.1 - . \ No newline at end of file diff --git a/docs/sdks/gpt/README.md b/docs/sdks/gpt/README.md index b26d90d..391e9a9 100755 --- a/docs/sdks/gpt/README.md +++ b/docs/sdks/gpt/README.md @@ -1,5 +1,6 @@ # Gpt SDK + ## Overview OpenAI API: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index bda0dea..e2c2817 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -1,4 +1,5 @@ # OpenAI +(*openAI*) ## Overview diff --git a/gen.yaml b/gen.yaml index 571d98c..61a81e1 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: ad804f44b6fe212c8ed113a0291ffdaf docVersion: 2.0.0 - speakeasyVersion: 1.91.0 - generationVersion: 2.129.1 + speakeasyVersion: 1.91.2 + generationVersion: 2.131.1 generation: sdkClassName: gpt sdkFlattening: true @@ -11,12 +11,12 @@ generation: telemetryEnabled: false features: typescript: - core: 2.88.0 + core: 2.88.1 deprecations: 2.81.1 globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.23.0 + version: 2.23.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index fba9c6c..a389d49 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.23.0", + "version": "2.23.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.23.0", + "version": "2.23.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 9129a43..0603b16 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.23.0", + "version": "2.23.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index ef93e3f..a1f3cc0 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -48,8 +48,8 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.23.0"; - genVersion = "2.129.1"; + sdkVersion = "2.23.1"; + genVersion = "2.131.1"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From 7496b9589e45d7c901c992a14e3129f8a7f6dbaf Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 29 Sep 2023 00:55:28 +0000 Subject: [PATCH 57/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.91.3 --- RELEASES.md | 12 +- ...trequest2.md => createimageeditrequest.md} | 2 +- ...est2.md => createimagevariationrequest.md} | 2 +- ...uest1.md => createtranscriptionrequest.md} | 2 +- docs/sdks/openai/README.md | 154 +++++++++--------- files.gen | 12 +- gen.yaml | 10 +- package-lock.json | 4 +- package.json | 2 +- src/internal/utils/requestbody.ts | 2 +- ...trequest2.ts => createimageeditrequest.ts} | 2 +- ...est2.ts => createimagevariationrequest.ts} | 2 +- ...uest1.ts => createtranscriptionrequest.ts} | 2 +- src/sdk/models/shared/index.ts | 6 +- src/sdk/openai.ts | 124 ++++---------- src/sdk/sdk.ts | 5 +- 16 files changed, 150 insertions(+), 193 deletions(-) rename docs/models/shared/{createimageeditrequest2.md => createimageeditrequest.md} (99%) rename docs/models/shared/{createimagevariationrequest2.md => createimagevariationrequest.md} (99%) rename docs/models/shared/{createtranscriptionrequest1.md => createtranscriptionrequest.md} (99%) rename src/sdk/models/shared/{createimageeditrequest2.ts => createimageeditrequest.ts} (97%) rename src/sdk/models/shared/{createimagevariationrequest2.ts => createimagevariationrequest.ts} (96%) rename src/sdk/models/shared/{createtranscriptionrequest1.ts => createtranscriptionrequest.ts} (97%) diff --git a/RELEASES.md b/RELEASES.md index a38799c..94e9597 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -640,4 +640,14 @@ Based on: ### Generated - [typescript v2.23.1] . ### Releases -- [NPM v2.23.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.23.1 - . \ No newline at end of file +- [NPM v2.23.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.23.1 - . + +## 2023-09-29 00:55:03 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.91.3 (2.139.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.24.0] . +### Releases +- [NPM v2.24.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.24.0 - . \ No newline at end of file diff --git a/docs/models/shared/createimageeditrequest2.md b/docs/models/shared/createimageeditrequest.md similarity index 99% rename from docs/models/shared/createimageeditrequest2.md rename to docs/models/shared/createimageeditrequest.md index 6c22a54..b84d229 100755 --- a/docs/models/shared/createimageeditrequest2.md +++ b/docs/models/shared/createimageeditrequest.md @@ -1,4 +1,4 @@ -# CreateImageEditRequest2 +# CreateImageEditRequest ## Fields diff --git a/docs/models/shared/createimagevariationrequest2.md b/docs/models/shared/createimagevariationrequest.md similarity index 99% rename from docs/models/shared/createimagevariationrequest2.md rename to docs/models/shared/createimagevariationrequest.md index 49c38ac..04c2fbd 100755 --- a/docs/models/shared/createimagevariationrequest2.md +++ b/docs/models/shared/createimagevariationrequest.md @@ -1,4 +1,4 @@ -# CreateImageVariationRequest2 +# CreateImageVariationRequest ## Fields diff --git a/docs/models/shared/createtranscriptionrequest1.md b/docs/models/shared/createtranscriptionrequest.md similarity index 99% rename from docs/models/shared/createtranscriptionrequest1.md rename to docs/models/shared/createtranscriptionrequest.md index 6d83ce2..8638432 100755 --- a/docs/models/shared/createtranscriptionrequest1.md +++ b/docs/models/shared/createtranscriptionrequest.md @@ -1,4 +1,4 @@ -# CreateTranscriptionRequest1 +# CreateTranscriptionRequest ## Fields diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index e2c2817..76f0bee 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -161,39 +161,39 @@ const sdk = new Gpt({ }); sdk.openAI.createChatCompletion({ - frequencyPenalty: 5488.14, + frequencyPenalty: 7707.26, functionCall: { - name: "Ellis Mitchell", + name: "Diesel Money", }, functions: [ { - description: "illum", - name: "Sabrina Oberbrunner", + description: "Progressive radical model", + name: "Account International incidunt", parameters: { - "magnam": "debitis", + "eum": "Meadows", }, }, ], logitBias: { - "ipsa": 963663, + "eos": 206153, }, - maxTokens: 272656, + maxTokens: 29019, messages: [ { - content: "suscipit", + content: "Northeast frictionless Park", functionCall: { - arguments: "molestiae", - name: "Irving Lehner", + arguments: "Future Southeast", + name: "Southeast", }, - name: "Mrs. Sophie Smith MD", - role: ChatCompletionRequestMessageRole.System, + name: "Faso", + role: ChatCompletionRequestMessageRole.User, }, ], model: "gpt-3.5-turbo", n: 1, - presencePenalty: 8326.2, + presencePenalty: 9408.67, stop: [ - "quo", + "tangible", ], stream: false, temperature: 1, @@ -237,22 +237,20 @@ const sdk = new Gpt({ }); sdk.openAI.createCompletion({ - bestOf: 140350, + bestOf: 160667, echo: false, - frequencyPenalty: 8700.13, + frequencyPenalty: 141.61, logitBias: { - "at": 978619, + "velit": 254881, }, - logprobs: 473608, + logprobs: 877910, maxTokens: 16, - model: CreateCompletionRequestModel2.TextBabbage001, + model: CreateCompletionRequestModel2.TextCurie001, n: 1, - presencePenalty: 4614.79, - prompt: [ - 780529, - ], + presencePenalty: 7232.16, + prompt: "This is a test.", stop: [ - "["\n"]", + "[\"\n\"]", ], stream: false, suffix: "test.", @@ -344,7 +342,9 @@ const sdk = new Gpt({ sdk.openAI.createEmbedding({ input: [ - 639921, + [ + 115613, + ], ], model: CreateEmbeddingRequestModel2.TextEmbeddingAda002, user: "user-1234", @@ -387,10 +387,10 @@ const sdk = new Gpt({ sdk.openAI.createFile({ file: { - content: "fugit".encode(), - file: "deleniti", + content: "`'$Z`(L/RH" as bytes <<<>>>, + file: "Rap National", }, - purpose: "hic", + purpose: "Female synergistic Maine", }).then((res: CreateFileResponse) => { if (res.statusCode == 200) { // handle response @@ -436,18 +436,18 @@ const sdk = new Gpt({ }); sdk.openAI.createFineTune({ - batchSize: 758616, + batchSize: 763928, classificationBetas: [ - 5218.48, + 3993.02, ], - classificationNClasses: 105907, - classificationPositiveClass: "commodi", + classificationNClasses: 172686, + classificationPositiveClass: "male Buckinghamshire", computeClassificationMetrics: false, - learningRateMultiplier: 4736, - model: "curie", - nEpochs: 186332, - promptLossWeight: 7742.34, - suffix: "cum", + learningRateMultiplier: 4447.26, + model: CreateFineTuneRequestModel2.Curie, + nEpochs: 441380, + promptLossWeight: 37.22, + suffix: "Reggae Gorgeous synthesizing", trainingFile: "file-abc123", validationFile: "file-abc123", }).then((res: CreateFineTuneResponse) => { @@ -500,7 +500,7 @@ sdk.openAI.createFineTuningJob({ nEpochs: CreateFineTuningJobRequestHyperparametersNEpochs1.Auto, }, model: "gpt-3.5-turbo", - suffix: "excepturi", + suffix: "Thallium", trainingFile: "file-abc123", validationFile: "file-abc123", }).then((res: CreateFineTuningJobResponse) => { @@ -585,12 +585,12 @@ const sdk = new Gpt({ sdk.openAI.createImageEdit({ image: { - content: "aspernatur".encode(), - image: "perferendis", + content: "0]/(|3W_T9" as bytes <<<>>>, + image: "https://loremflickr.com/640/480", }, mask: { - content: "ad".encode(), - mask: "natus", + content: "`^YjrpxopK" as bytes <<<>>>, + mask: "Rap Dodge Incredible", }, n: 1, prompt: "A cute baby sea otter wearing a beret", @@ -606,10 +606,10 @@ sdk.openAI.createImageEdit({ ### Parameters -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `request` | [shared.CreateImageEditRequest2](../../models/shared/createimageeditrequest2.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `request` | [shared.CreateImageEditRequest](../../models/shared/createimageeditrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | ### Response @@ -636,8 +636,8 @@ const sdk = new Gpt({ sdk.openAI.createImageVariation({ image: { - content: "sed".encode(), - image: "iste", + content: "`YY7PCrWuK" as bytes <<<>>>, + image: "https://loremflickr.com/640/480", }, n: 1, responseFormat: CreateImageVariationRequestResponseFormat.Url, @@ -652,10 +652,10 @@ sdk.openAI.createImageVariation({ ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `request` | [shared.CreateImageVariationRequest2](../../models/shared/createimagevariationrequest2.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `request` | [shared.CreateImageVariationRequest](../../models/shared/createimagevariationrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | ### Response @@ -681,7 +681,9 @@ const sdk = new Gpt({ }); sdk.openAI.createModeration({ - input: "I want to kill them.", + input: [ + "I want to kill them.", + ], model: CreateModerationRequestModel2.TextModerationStable, }).then((res: CreateModerationResponse) => { if (res.statusCode == 200) { @@ -722,14 +724,14 @@ const sdk = new Gpt({ sdk.openAI.createTranscription({ file: { - content: "laboriosam".encode(), - file: "hic", + content: "\#BbTW'zX9" as bytes <<<>>>, + file: "Buckinghamshire", }, - language: "saepe", - model: CreateTranscriptionRequestModel2.Whisper1, - prompt: "in", - responseFormat: CreateTranscriptionRequestResponseFormat.Text, - temperature: 6130.64, + language: "teal Titanium", + model: "whisper-1", + prompt: "Mendelevium Kansas behind", + responseFormat: CreateTranscriptionRequestResponseFormat.Json, + temperature: 3694.44, }).then((res: CreateTranscriptionResponse) => { if (res.statusCode == 200) { // handle response @@ -739,10 +741,10 @@ sdk.openAI.createTranscription({ ### Parameters -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `request` | [shared.CreateTranscriptionRequest1](../../models/shared/createtranscriptionrequest1.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `request` | [shared.CreateTranscriptionRequest](../../models/shared/createtranscriptionrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | ### Response @@ -769,13 +771,13 @@ const sdk = new Gpt({ sdk.openAI.createTranslation({ file: { - content: "iure".encode(), - file: "saepe", + content: "M57UL;W3rx" as bytes <<<>>>, + file: "Reggae Toys silver", }, model: CreateTranslationRequestModel2.Whisper1, - prompt: "architecto", - responseFormat: "ipsa", - temperature: 9698.1, + prompt: "Soft East Frozen", + responseFormat: "Analyst aboard relocate", + temperature: 6003.73, }).then((res: CreateTranslationResponse) => { if (res.statusCode == 200) { // handle response @@ -813,7 +815,7 @@ const sdk = new Gpt({ }); sdk.openAI.deleteFile({ - fileId: "est", + fileId: "yellow kiddingly white", }).then((res: DeleteFileResponse) => { if (res.statusCode == 200) { // handle response @@ -889,7 +891,7 @@ const sdk = new Gpt({ }); sdk.openAI.downloadFile({ - fileId: "mollitia", + fileId: "Maserati Bronze Audi", }).then((res: DownloadFileResponse) => { if (res.statusCode == 200) { // handle response @@ -1043,9 +1045,9 @@ const sdk = new Gpt({ }); sdk.openAI.listFineTuningEvents({ - after: "laborum", + after: "phew silver Consultant", fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - limit: 170909, + limit: 104325, }).then((res: ListFineTuningEventsResponse) => { if (res.statusCode == 200) { // handle response @@ -1119,8 +1121,8 @@ const sdk = new Gpt({ }); sdk.openAI.listPaginatedFineTuningJobs({ - after: "dolorem", - limit: 358152, + after: "GB voluptate", + limit: 374490, }).then((res: ListPaginatedFineTuningJobsResponse) => { if (res.statusCode == 200) { // handle response @@ -1158,7 +1160,7 @@ const sdk = new Gpt({ }); sdk.openAI.retrieveFile({ - fileId: "explicabo", + fileId: "online Facilitator enfold", }).then((res: RetrieveFileResponse) => { if (res.statusCode == 200) { // handle response diff --git a/files.gen b/files.gen index 0f8b21b..fe58da2 100755 --- a/files.gen +++ b/files.gen @@ -72,12 +72,12 @@ src/sdk/models/shared/createfinetuningjobrequest.ts src/sdk/models/shared/imagesresponse.ts src/sdk/models/shared/image.ts src/sdk/models/shared/createimagerequest.ts -src/sdk/models/shared/createimageeditrequest2.ts -src/sdk/models/shared/createimagevariationrequest2.ts +src/sdk/models/shared/createimageeditrequest.ts +src/sdk/models/shared/createimagevariationrequest.ts src/sdk/models/shared/createmoderationresponse.ts src/sdk/models/shared/createmoderationrequest.ts src/sdk/models/shared/createtranscriptionresponse.ts -src/sdk/models/shared/createtranscriptionrequest1.ts +src/sdk/models/shared/createtranscriptionrequest.ts src/sdk/models/shared/createtranslationresponse.ts src/sdk/models/shared/createtranslationrequest.ts src/sdk/models/shared/deletefileresponse.ts @@ -190,11 +190,11 @@ docs/models/shared/createimageeditrequestimage.md docs/models/shared/createimageeditrequestmask.md docs/models/shared/createimageeditrequestresponseformat.md docs/models/shared/createimageeditrequestsize.md -docs/models/shared/createimageeditrequest2.md +docs/models/shared/createimageeditrequest.md docs/models/shared/createimagevariationrequestimage.md docs/models/shared/createimagevariationrequestresponseformat.md docs/models/shared/createimagevariationrequestsize.md -docs/models/shared/createimagevariationrequest2.md +docs/models/shared/createimagevariationrequest.md docs/models/shared/createmoderationresponseresultscategories.md docs/models/shared/createmoderationresponseresultscategoryscores.md docs/models/shared/createmoderationresponseresults.md @@ -205,7 +205,7 @@ docs/models/shared/createtranscriptionresponse.md docs/models/shared/createtranscriptionrequestfile.md docs/models/shared/createtranscriptionrequestmodel2.md docs/models/shared/createtranscriptionrequestresponseformat.md -docs/models/shared/createtranscriptionrequest1.md +docs/models/shared/createtranscriptionrequest.md docs/models/shared/createtranslationresponse.md docs/models/shared/createtranslationrequestfile.md docs/models/shared/createtranslationrequestmodel2.md diff --git a/gen.yaml b/gen.yaml index 61a81e1..d4c1ebd 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,9 +1,9 @@ configVersion: 1.0.0 management: - docChecksum: ad804f44b6fe212c8ed113a0291ffdaf + docChecksum: b1068f88203a26e2dc0afaa17203e882 docVersion: 2.0.0 - speakeasyVersion: 1.91.2 - generationVersion: 2.131.1 + speakeasyVersion: 1.91.3 + generationVersion: 2.139.1 generation: sdkClassName: gpt sdkFlattening: true @@ -11,12 +11,12 @@ generation: telemetryEnabled: false features: typescript: - core: 2.88.1 + core: 2.89.1 deprecations: 2.81.1 globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.23.1 + version: 2.24.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index a389d49..2f626bf 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.23.1", + "version": "2.24.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.23.1", + "version": "2.24.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 0603b16..8792b98 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.23.1", + "version": "2.24.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/internal/utils/requestbody.ts b/src/internal/utils/requestbody.ts index f8f4691..f7a1f55 100755 --- a/src/internal/utils/requestbody.ts +++ b/src/internal/utils/requestbody.ts @@ -315,7 +315,7 @@ function encodeMultipartFormDataFile(formData: FormData, file: any): FormData { if (mpFormDecoratorName === "" || fileName === "" || content == null) { throw new Error("invalid multipart/form-data file"); } - formData.append("file", Buffer.from(content), fileName); + formData.append(mpFormDecoratorName, Buffer.from(content), fileName); return formData; } diff --git a/src/sdk/models/shared/createimageeditrequest2.ts b/src/sdk/models/shared/createimageeditrequest.ts similarity index 97% rename from src/sdk/models/shared/createimageeditrequest2.ts rename to src/sdk/models/shared/createimageeditrequest.ts index ce77bbf..4b7728d 100755 --- a/src/sdk/models/shared/createimageeditrequest2.ts +++ b/src/sdk/models/shared/createimageeditrequest.ts @@ -37,7 +37,7 @@ export enum CreateImageEditRequestSize { OneThousandAndTwentyFourx1024 = "1024x1024", } -export class CreateImageEditRequest2 extends SpeakeasyBase { +export class CreateImageEditRequest extends SpeakeasyBase { /** * The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. */ diff --git a/src/sdk/models/shared/createimagevariationrequest2.ts b/src/sdk/models/shared/createimagevariationrequest.ts similarity index 96% rename from src/sdk/models/shared/createimagevariationrequest2.ts rename to src/sdk/models/shared/createimagevariationrequest.ts index a9267a3..45b5b13 100755 --- a/src/sdk/models/shared/createimagevariationrequest2.ts +++ b/src/sdk/models/shared/createimagevariationrequest.ts @@ -29,7 +29,7 @@ export enum CreateImageVariationRequestSize { OneThousandAndTwentyFourx1024 = "1024x1024", } -export class CreateImageVariationRequest2 extends SpeakeasyBase { +export class CreateImageVariationRequest extends SpeakeasyBase { /** * The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. */ diff --git a/src/sdk/models/shared/createtranscriptionrequest1.ts b/src/sdk/models/shared/createtranscriptionrequest.ts similarity index 97% rename from src/sdk/models/shared/createtranscriptionrequest1.ts rename to src/sdk/models/shared/createtranscriptionrequest.ts index 8d6b1a1..61ba8da 100755 --- a/src/sdk/models/shared/createtranscriptionrequest1.ts +++ b/src/sdk/models/shared/createtranscriptionrequest.ts @@ -36,7 +36,7 @@ export enum CreateTranscriptionRequestResponseFormat { Vtt = "vtt", } -export class CreateTranscriptionRequest1 extends SpeakeasyBase { +export class CreateTranscriptionRequest extends SpeakeasyBase { /** * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. * diff --git a/src/sdk/models/shared/index.ts b/src/sdk/models/shared/index.ts index 02dca0f..e0ef1fd 100755 --- a/src/sdk/models/shared/index.ts +++ b/src/sdk/models/shared/index.ts @@ -18,12 +18,12 @@ export * from "./createembeddingresponse"; export * from "./createfilerequest"; export * from "./createfinetunerequest"; export * from "./createfinetuningjobrequest"; -export * from "./createimageeditrequest2"; +export * from "./createimageeditrequest"; export * from "./createimagerequest"; -export * from "./createimagevariationrequest2"; +export * from "./createimagevariationrequest"; export * from "./createmoderationrequest"; export * from "./createmoderationresponse"; -export * from "./createtranscriptionrequest1"; +export * from "./createtranscriptionrequest"; export * from "./createtranscriptionresponse"; export * from "./createtranslationrequest"; export * from "./createtranslationresponse"; diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index ff92665..e7aaa0c 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -51,9 +51,7 @@ export class OpenAI { const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -127,9 +125,7 @@ export class OpenAI { const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -217,9 +213,7 @@ export class OpenAI { if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -308,9 +302,7 @@ export class OpenAI { if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -400,9 +392,7 @@ export class OpenAI { if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -490,9 +480,7 @@ export class OpenAI { if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -581,9 +569,7 @@ export class OpenAI { if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -675,9 +661,7 @@ export class OpenAI { if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -767,9 +751,7 @@ export class OpenAI { if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -858,9 +840,7 @@ export class OpenAI { if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -909,11 +889,11 @@ export class OpenAI { * Creates an edited or extended image given an original image and a prompt. */ async createImageEdit( - req: shared.CreateImageEditRequest2, + req: shared.CreateImageEditRequest, config?: AxiosRequestConfig ): Promise { if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateImageEditRequest2(req); + req = new shared.CreateImageEditRequest(req); } const baseURL: string = utils.templateUrl( @@ -948,9 +928,7 @@ export class OpenAI { if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -999,11 +977,11 @@ export class OpenAI { * Creates a variation of a given image. */ async createImageVariation( - req: shared.CreateImageVariationRequest2, + req: shared.CreateImageVariationRequest, config?: AxiosRequestConfig ): Promise { if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateImageVariationRequest2(req); + req = new shared.CreateImageVariationRequest(req); } const baseURL: string = utils.templateUrl( @@ -1038,9 +1016,7 @@ export class OpenAI { if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -1129,9 +1105,7 @@ export class OpenAI { if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -1180,11 +1154,11 @@ export class OpenAI { * Transcribes audio into the input language. */ async createTranscription( - req: shared.CreateTranscriptionRequest1, + req: shared.CreateTranscriptionRequest, config?: AxiosRequestConfig ): Promise { if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateTranscriptionRequest1(req); + req = new shared.CreateTranscriptionRequest(req); } const baseURL: string = utils.templateUrl( @@ -1219,9 +1193,7 @@ export class OpenAI { if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -1310,9 +1282,7 @@ export class OpenAI { if (reqBody == null) throw new Error("request body is required"); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -1385,9 +1355,7 @@ export class OpenAI { const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -1459,9 +1427,7 @@ export class OpenAI { const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -1533,9 +1499,7 @@ export class OpenAI { const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -1597,9 +1561,7 @@ export class OpenAI { const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -1675,9 +1637,7 @@ export class OpenAI { const queryParams: string = utils.serializeQueryParams(req); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -1746,9 +1706,7 @@ export class OpenAI { const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -1826,9 +1784,7 @@ export class OpenAI { const queryParams: string = utils.serializeQueryParams(req); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -1894,9 +1850,7 @@ export class OpenAI { const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -1970,9 +1924,7 @@ export class OpenAI { const queryParams: string = utils.serializeQueryParams(req); headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -2045,9 +1997,7 @@ export class OpenAI { const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -2121,9 +2071,7 @@ export class OpenAI { const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -2199,9 +2147,7 @@ export class OpenAI { const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, @@ -2274,9 +2220,7 @@ export class OpenAI { const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; headers["Accept"] = "application/json"; - headers[ - "user-agent" - ] = `speakeasy-sdk/${this.sdkConfiguration.language} ${this.sdkConfiguration.sdkVersion} ${this.sdkConfiguration.genVersion} ${this.sdkConfiguration.openapiDocVersion}`; + headers["user-agent"] = this.sdkConfiguration.userAgent; const httpRes: AxiosResponse = await client.request({ validateStatus: () => true, diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index a1f3cc0..65e37af 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -48,8 +48,9 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.23.1"; - genVersion = "2.131.1"; + sdkVersion = "2.24.0"; + genVersion = "2.139.1"; + userAgent = "speakeasy-sdk/typescript 2.24.0 2.139.1 2.0.0 @speakeasy-api/openai"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From e8cb790bdbb6ab49e853ff30c1e9e16c25ec33c8 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sun, 1 Oct 2023 01:02:32 +0000 Subject: [PATCH 58/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.92.2 --- RELEASES.md | 12 +++++++++++- gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 6 +++--- 5 files changed, 21 insertions(+), 11 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 94e9597..1faec82 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -650,4 +650,14 @@ Based on: ### Generated - [typescript v2.24.0] . ### Releases -- [NPM v2.24.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.24.0 - . \ No newline at end of file +- [NPM v2.24.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.24.0 - . + +## 2023-10-01 01:02:08 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.92.2 (2.142.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.25.0] . +### Releases +- [NPM v2.25.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.25.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index d4c1ebd..904ab42 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: b1068f88203a26e2dc0afaa17203e882 docVersion: 2.0.0 - speakeasyVersion: 1.91.3 - generationVersion: 2.139.1 + speakeasyVersion: 1.92.2 + generationVersion: 2.142.2 generation: sdkClassName: gpt sdkFlattening: true @@ -11,12 +11,12 @@ generation: telemetryEnabled: false features: typescript: - core: 2.89.1 + core: 2.90.0 deprecations: 2.81.1 globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.24.0 + version: 2.25.0 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 2f626bf..226de5a 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.24.0", + "version": "2.25.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.24.0", + "version": "2.25.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 8792b98..09d10ef 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.24.0", + "version": "2.25.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 65e37af..4df4702 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -48,9 +48,9 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.24.0"; - genVersion = "2.139.1"; - userAgent = "speakeasy-sdk/typescript 2.24.0 2.139.1 2.0.0 @speakeasy-api/openai"; + sdkVersion = "2.25.0"; + genVersion = "2.142.2"; + userAgent = "speakeasy-sdk/typescript 2.25.0 2.142.2 2.0.0 @speakeasy-api/openai"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From b3824643fad1bfa51255dca9a40763ea13709754 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Mon, 2 Oct 2023 00:56:10 +0000 Subject: [PATCH 59/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.92.3 --- RELEASES.md | 12 +++++++++++- gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/sdk.ts | 6 +++--- 5 files changed, 21 insertions(+), 11 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 1faec82..a417e12 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -660,4 +660,14 @@ Based on: ### Generated - [typescript v2.25.0] . ### Releases -- [NPM v2.25.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.25.0 - . \ No newline at end of file +- [NPM v2.25.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.25.0 - . + +## 2023-10-02 00:55:44 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.92.3 (2.143.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.25.1] . +### Releases +- [NPM v2.25.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.25.1 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 904ab42..5876be7 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: b1068f88203a26e2dc0afaa17203e882 docVersion: 2.0.0 - speakeasyVersion: 1.92.2 - generationVersion: 2.142.2 + speakeasyVersion: 1.92.3 + generationVersion: 2.143.2 generation: sdkClassName: gpt sdkFlattening: true @@ -11,12 +11,12 @@ generation: telemetryEnabled: false features: typescript: - core: 2.90.0 + core: 2.90.1 deprecations: 2.81.1 globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.25.0 + version: 2.25.1 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 226de5a..76c9bee 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.25.0", + "version": "2.25.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.25.0", + "version": "2.25.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 09d10ef..7173521 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.25.0", + "version": "2.25.1", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 4df4702..2375576 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -48,9 +48,9 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.25.0"; - genVersion = "2.142.2"; - userAgent = "speakeasy-sdk/typescript 2.25.0 2.142.2 2.0.0 @speakeasy-api/openai"; + sdkVersion = "2.25.1"; + genVersion = "2.143.2"; + userAgent = "speakeasy-sdk/typescript 2.25.1 2.143.2 2.0.0 @speakeasy-api/openai"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From 8d500ecdf75ddf4246cbca699e02d5da91e1f9e8 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Tue, 3 Oct 2023 00:56:15 +0000 Subject: [PATCH 60/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.93.1 --- README.md | 2 +- RELEASES.md | 12 +++++++++++- .../shared/chatcompletionfunctioncalloption.md | 3 ++- .../models/shared/createchatcompletionrequest.md | 8 ++++---- .../createchatcompletionrequestfunctioncall1.md | 3 ++- docs/models/shared/createcompletionrequest.md | 4 ++-- docs/models/shared/createembeddingrequest.md | 10 +++++----- docs/models/shared/createfilerequest.md | 8 ++++---- docs/models/shared/finetuneevent.md | 6 +++++- docs/models/shared/finetuningjobevent.md | 2 ++ docs/sdks/openai/README.md | 4 ++-- gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- .../shared/chatcompletionfunctioncalloption.ts | 5 ++++- .../models/shared/createchatcompletionrequest.ts | 16 +++++++++++----- src/sdk/models/shared/createcompletionrequest.ts | 4 ++-- src/sdk/models/shared/createembeddingrequest.ts | 2 +- src/sdk/models/shared/createfilerequest.ts | 6 +++--- src/sdk/models/shared/finetuneevent.ts | 5 +++++ src/sdk/models/shared/finetuningjobevent.ts | 3 +++ src/sdk/openai.ts | 2 +- src/sdk/sdk.ts | 6 +++--- 23 files changed, 80 insertions(+), 45 deletions(-) diff --git a/README.md b/README.md index 4288195..8968b2a 100755 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ sdk.openAI.cancelFineTune({ * [createCompletion](docs/sdks/openai/README.md#createcompletion) - Creates a completion for the provided prompt and parameters. * [~~createEdit~~](docs/sdks/openai/README.md#createedit) - Creates a new edit for the provided input, instruction, and parameters. :warning: **Deprecated** * [createEmbedding](docs/sdks/openai/README.md#createembedding) - Creates an embedding vector representing the input text. -* [createFile](docs/sdks/openai/README.md#createfile) - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. +* [createFile](docs/sdks/openai/README.md#createfile) - Upload a file that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please [contact us](https://help.openai.com/) if you need to increase the storage limit. * [~~createFineTune~~](docs/sdks/openai/README.md#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. diff --git a/RELEASES.md b/RELEASES.md index a417e12..13307d4 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -670,4 +670,14 @@ Based on: ### Generated - [typescript v2.25.1] . ### Releases -- [NPM v2.25.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.25.1 - . \ No newline at end of file +- [NPM v2.25.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.25.1 - . + +## 2023-10-03 00:55:51 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.93.1 (2.144.7) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.25.2] . +### Releases +- [NPM v2.25.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.25.2 - . \ No newline at end of file diff --git a/docs/models/shared/chatcompletionfunctioncalloption.md b/docs/models/shared/chatcompletionfunctioncalloption.md index b4f28c6..f2670cf 100755 --- a/docs/models/shared/chatcompletionfunctioncalloption.md +++ b/docs/models/shared/chatcompletionfunctioncalloption.md @@ -1,6 +1,7 @@ # ChatCompletionFunctionCallOption -Controls how the model responds to function calls. `none` means the model does not call a function, and responds to the end-user. `auto` means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. +Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + ## Fields diff --git a/docs/models/shared/createchatcompletionrequest.md b/docs/models/shared/createchatcompletionrequest.md index 38ddc2d..021377b 100755 --- a/docs/models/shared/createchatcompletionrequest.md +++ b/docs/models/shared/createchatcompletionrequest.md @@ -6,16 +6,16 @@ | Field | Type | Required | Description | Example | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `frequencyPenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.

[See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details)
| | -| `functionCall` | *any* | :heavy_minus_sign: | Controls how the model responds to function calls. `none` means the model does not call a function, and responds to the end-user. `auto` means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. | | +| `functionCall` | *any* | :heavy_minus_sign: | Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present.
| | | `functions` | [ChatCompletionFunctions](../../models/shared/chatcompletionfunctions.md)[] | :heavy_minus_sign: | A list of functions the model may generate JSON inputs for. | | | `logitBias` | Record | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
| | -| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the chat completion.

The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| | -| `messages` | [ChatCompletionRequestMessage](../../models/shared/chatcompletionrequestmessage.md)[] | :heavy_check_mark: | A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). | | +| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the chat completion.

The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
| | +| `messages` | [ChatCompletionRequestMessage](../../models/shared/chatcompletionrequestmessage.md)[] | :heavy_check_mark: | A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). | | | `model` | *any* | :heavy_check_mark: | ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. | | | `n` | *number* | :heavy_minus_sign: | How many chat completion choices to generate for each input message. | 1 | | `presencePenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.

[See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details)
| | | `stop` | *any* | :heavy_minus_sign: | Up to 4 sequences where the API will stop generating further tokens.
| | -| `stream` | *boolean* | :heavy_minus_sign: | If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
| | +| `stream` | *boolean* | :heavy_minus_sign: | If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
| | | `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

We generally recommend altering this or `top_p` but not both.
| 1 | | `topP` | *number* | :heavy_minus_sign: | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.

We generally recommend altering this or `temperature` but not both.
| 1 | | `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionrequestfunctioncall1.md b/docs/models/shared/createchatcompletionrequestfunctioncall1.md index c6ee534..5570881 100755 --- a/docs/models/shared/createchatcompletionrequestfunctioncall1.md +++ b/docs/models/shared/createchatcompletionrequestfunctioncall1.md @@ -1,6 +1,7 @@ # CreateChatCompletionRequestFunctionCall1 -Controls how the model responds to function calls. `none` means the model does not call a function, and responds to the end-user. `auto` means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. +Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + ## Values diff --git a/docs/models/shared/createcompletionrequest.md b/docs/models/shared/createcompletionrequest.md index 9080a5e..eeb4f37 100755 --- a/docs/models/shared/createcompletionrequest.md +++ b/docs/models/shared/createcompletionrequest.md @@ -10,13 +10,13 @@ | `frequencyPenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.

[See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details)
| | | `logitBias` | Record | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.

As an example, you can pass `{"50256": -100}` to prevent the <\|endoftext\|> token from being generated.
| | | `logprobs` | *number* | :heavy_minus_sign: | Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.

The maximum value for `logprobs` is 5.
| | -| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the completion.

The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| 16 | +| `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the completion.

The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
| 16 | | `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
| | | `n` | *number* | :heavy_minus_sign: | How many completions to generate for each prompt.

**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
| 1 | | `presencePenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.

[See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details)
| | | `prompt` | *any* | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.

Note that <\|endoftext\|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
| | | `stop` | *any* | :heavy_minus_sign: | Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
| | -| `stream` | *boolean* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
| | +| `stream` | *boolean* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
| | | `suffix` | *string* | :heavy_minus_sign: | The suffix that comes after a completion of inserted text. | test. | | `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

We generally recommend altering this or `top_p` but not both.
| 1 | | `topP` | *number* | :heavy_minus_sign: | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.

We generally recommend altering this or `temperature` but not both.
| 1 | diff --git a/docs/models/shared/createembeddingrequest.md b/docs/models/shared/createembeddingrequest.md index d0110d9..c4243a1 100755 --- a/docs/models/shared/createembeddingrequest.md +++ b/docs/models/shared/createembeddingrequest.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `input` | *any* | :heavy_check_mark: | Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
| | -| `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
| | -| `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input` | *any* | :heavy_check_mark: | Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
| | +| `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
| | +| `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | \ No newline at end of file diff --git a/docs/models/shared/createfilerequest.md b/docs/models/shared/createfilerequest.md index e9f4d06..b31af0f 100755 --- a/docs/models/shared/createfilerequest.md +++ b/docs/models/shared/createfilerequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `file` | [CreateFileRequestFile](../../models/shared/createfilerequestfile.md) | :heavy_check_mark: | Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded.

If the `purpose` is set to "fine-tune", the file will be used for fine-tuning.
| -| `purpose` | *string* | :heavy_check_mark: | The intended purpose of the uploaded documents.

Use "fine-tune" for [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file.
| \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [CreateFileRequestFile](../../models/shared/createfilerequestfile.md) | :heavy_check_mark: | The file object (not file name) to be uploaded.

If the `purpose` is set to "fine-tune", the file will be used for fine-tuning.
| +| `purpose` | *string* | :heavy_check_mark: | The intended purpose of the uploaded file.

Use "fine-tune" for [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file is correct for fine-tuning.
| \ No newline at end of file diff --git a/docs/models/shared/finetuneevent.md b/docs/models/shared/finetuneevent.md index 57ab0e6..3ae123c 100755 --- a/docs/models/shared/finetuneevent.md +++ b/docs/models/shared/finetuneevent.md @@ -1,4 +1,8 @@ -# FineTuneEvent +# ~~FineTuneEvent~~ + +Fine-tune event object + +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. ## Fields diff --git a/docs/models/shared/finetuningjobevent.md b/docs/models/shared/finetuningjobevent.md index 096aed7..5966bfa 100755 --- a/docs/models/shared/finetuningjobevent.md +++ b/docs/models/shared/finetuningjobevent.md @@ -1,5 +1,7 @@ # FineTuningJobEvent +Fine-tuning job event object + ## Fields diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index 76f0bee..3901455 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -15,7 +15,7 @@ The OpenAI REST API * [createCompletion](#createcompletion) - Creates a completion for the provided prompt and parameters. * [~~createEdit~~](#createedit) - Creates a new edit for the provided input, instruction, and parameters. :warning: **Deprecated** * [createEmbedding](#createembedding) - Creates an embedding vector representing the input text. -* [createFile](#createfile) - Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. +* [createFile](#createfile) - Upload a file that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please [contact us](https://help.openai.com/) if you need to increase the storage limit. * [~~createFineTune~~](#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. @@ -370,7 +370,7 @@ sdk.openAI.createEmbedding({ ## createFile -Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. +Upload a file that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please [contact us](https://help.openai.com/) if you need to increase the storage limit. ### Example Usage diff --git a/gen.yaml b/gen.yaml index 5876be7..bf1f74f 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,9 +1,9 @@ configVersion: 1.0.0 management: - docChecksum: b1068f88203a26e2dc0afaa17203e882 + docChecksum: 11e459ef36cd22c19855de8f048393af docVersion: 2.0.0 - speakeasyVersion: 1.92.3 - generationVersion: 2.143.2 + speakeasyVersion: 1.93.1 + generationVersion: 2.144.7 generation: sdkClassName: gpt sdkFlattening: true @@ -16,7 +16,7 @@ features: globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.25.1 + version: 2.25.2 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 76c9bee..3e31110 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.25.1", + "version": "2.25.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.25.1", + "version": "2.25.2", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 7173521..616fa7b 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.25.1", + "version": "2.25.2", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/chatcompletionfunctioncalloption.ts b/src/sdk/models/shared/chatcompletionfunctioncalloption.ts index c7694b0..29ef817 100755 --- a/src/sdk/models/shared/chatcompletionfunctioncalloption.ts +++ b/src/sdk/models/shared/chatcompletionfunctioncalloption.ts @@ -6,7 +6,10 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; /** - * Controls how the model responds to function calls. `none` means the model does not call a function, and responds to the end-user. `auto` means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. + * Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + * + * @remarks + * */ export class ChatCompletionFunctionCallOption extends SpeakeasyBase { /** diff --git a/src/sdk/models/shared/createchatcompletionrequest.ts b/src/sdk/models/shared/createchatcompletionrequest.ts index fc4f3bb..58f3713 100755 --- a/src/sdk/models/shared/createchatcompletionrequest.ts +++ b/src/sdk/models/shared/createchatcompletionrequest.ts @@ -8,7 +8,10 @@ import { ChatCompletionRequestMessage } from "./chatcompletionrequestmessage"; import { Expose, Type } from "class-transformer"; /** - * Controls how the model responds to function calls. `none` means the model does not call a function, and responds to the end-user. `auto` means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. + * Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + * + * @remarks + * */ export enum CreateChatCompletionRequestFunctionCall1 { None = "none", @@ -46,7 +49,10 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { frequencyPenalty?: number; /** - * Controls how the model responds to function calls. `none` means the model does not call a function, and responds to the end-user. `auto` means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. + * Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + * + * @remarks + * */ @SpeakeasyMetadata() @Expose({ name: "function_call" }) @@ -77,7 +83,7 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { * * @remarks * - * The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. + * The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. * */ @SpeakeasyMetadata() @@ -85,7 +91,7 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { maxTokens?: number; /** - * A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb). + * A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). */ @SpeakeasyMetadata({ elemType: ChatCompletionRequestMessage }) @Expose({ name: "messages" }) @@ -129,7 +135,7 @@ export class CreateChatCompletionRequest extends SpeakeasyBase { stop?: any; /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). * * @remarks * diff --git a/src/sdk/models/shared/createcompletionrequest.ts b/src/sdk/models/shared/createcompletionrequest.ts index 2ca9fa2..e7c54e8 100755 --- a/src/sdk/models/shared/createcompletionrequest.ts +++ b/src/sdk/models/shared/createcompletionrequest.ts @@ -92,7 +92,7 @@ export class CreateCompletionRequest extends SpeakeasyBase { * * @remarks * - * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. + * The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. * */ @SpeakeasyMetadata() @@ -156,7 +156,7 @@ export class CreateCompletionRequest extends SpeakeasyBase { stop?: any; /** - * Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb). + * Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). * * @remarks * diff --git a/src/sdk/models/shared/createembeddingrequest.ts b/src/sdk/models/shared/createembeddingrequest.ts index 51b00fe..11dcdf9 100755 --- a/src/sdk/models/shared/createembeddingrequest.ts +++ b/src/sdk/models/shared/createembeddingrequest.ts @@ -17,7 +17,7 @@ export enum CreateEmbeddingRequestModel2 { export class CreateEmbeddingRequest extends SpeakeasyBase { /** - * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens. + * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. * * @remarks * diff --git a/src/sdk/models/shared/createfilerequest.ts b/src/sdk/models/shared/createfilerequest.ts index f4f411e..eb2eec1 100755 --- a/src/sdk/models/shared/createfilerequest.ts +++ b/src/sdk/models/shared/createfilerequest.ts @@ -14,7 +14,7 @@ export class CreateFileRequestFile extends SpeakeasyBase { export class CreateFileRequest extends SpeakeasyBase { /** - * Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. + * The file object (not file name) to be uploaded. * * @remarks * @@ -25,11 +25,11 @@ export class CreateFileRequest extends SpeakeasyBase { file: CreateFileRequestFile; /** - * The intended purpose of the uploaded documents. + * The intended purpose of the uploaded file. * * @remarks * - * Use "fine-tune" for [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file. + * Use "fine-tune" for [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file is correct for fine-tuning. * */ @SpeakeasyMetadata({ data: "multipart_form, name=purpose" }) diff --git a/src/sdk/models/shared/finetuneevent.ts b/src/sdk/models/shared/finetuneevent.ts index 94b7efd..dffe08c 100755 --- a/src/sdk/models/shared/finetuneevent.ts +++ b/src/sdk/models/shared/finetuneevent.ts @@ -5,6 +5,11 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; +/** + * Fine-tune event object + * + * @deprecated class: This will be removed in a future release, please migrate away from it as soon as possible. + */ export class FineTuneEvent extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "created_at" }) diff --git a/src/sdk/models/shared/finetuningjobevent.ts b/src/sdk/models/shared/finetuningjobevent.ts index 43f4a41..d669c4c 100755 --- a/src/sdk/models/shared/finetuningjobevent.ts +++ b/src/sdk/models/shared/finetuningjobevent.ts @@ -11,6 +11,9 @@ export enum FineTuningJobEventLevel { Error = "error", } +/** + * Fine-tuning job event object + */ export class FineTuningJobEvent extends SpeakeasyBase { @SpeakeasyMetadata() @Expose({ name: "created_at" }) diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index e7aaa0c..308944d 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -526,7 +526,7 @@ export class OpenAI { } /** - * Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. + * Upload a file that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please [contact us](https://help.openai.com/) if you need to increase the storage limit. * */ async createFile( diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 2375576..73238ed 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -48,9 +48,9 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.25.1"; - genVersion = "2.143.2"; - userAgent = "speakeasy-sdk/typescript 2.25.1 2.143.2 2.0.0 @speakeasy-api/openai"; + sdkVersion = "2.25.2"; + genVersion = "2.144.7"; + userAgent = "speakeasy-sdk/typescript 2.25.2 2.144.7 2.0.0 @speakeasy-api/openai"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From b7c41fa06479be86f16dd70814487b8dc5468298 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Thu, 5 Oct 2023 00:55:50 +0000 Subject: [PATCH 61/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.94.0 --- RELEASES.md | 12 +++++++++++- gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- src/internal/utils/utils.ts | 2 +- src/sdk/sdk.ts | 6 +++--- 6 files changed, 22 insertions(+), 12 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 13307d4..ca3789c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -680,4 +680,14 @@ Based on: ### Generated - [typescript v2.25.2] . ### Releases -- [NPM v2.25.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.25.2 - . \ No newline at end of file +- [NPM v2.25.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.25.2 - . + +## 2023-10-05 00:55:18 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.94.0 (2.147.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.25.3] . +### Releases +- [NPM v2.25.3] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.25.3 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index bf1f74f..52089d5 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: 11e459ef36cd22c19855de8f048393af docVersion: 2.0.0 - speakeasyVersion: 1.93.1 - generationVersion: 2.144.7 + speakeasyVersion: 1.94.0 + generationVersion: 2.147.0 generation: sdkClassName: gpt sdkFlattening: true @@ -11,12 +11,12 @@ generation: telemetryEnabled: false features: typescript: - core: 2.90.1 + core: 2.90.3 deprecations: 2.81.1 globalSecurity: 2.81.1 globalServerURLs: 2.82.0 typescript: - version: 2.25.2 + version: 2.25.3 author: speakeasy-openai maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/package-lock.json b/package-lock.json index 3e31110..276d8a0 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.25.2", + "version": "2.25.3", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.25.2", + "version": "2.25.3", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 616fa7b..bf345f4 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.25.2", + "version": "2.25.3", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/internal/utils/utils.ts b/src/internal/utils/utils.ts index dc6b289..03daf88 100755 --- a/src/internal/utils/utils.ts +++ b/src/internal/utils/utils.ts @@ -81,7 +81,7 @@ export class SpeakeasyBase { for (const prop of props) { if (payload && payload.hasOwnProperty(prop.key)) { const value = payload[prop.key]; - if (isSpeakeasyBase(prop.type)) { + if (isSpeakeasyBase(prop.type) && value != null) { (this as any)[prop.key] = new prop.type(value); } else if ( prop.type.name == "Array" && diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 73238ed..5cf6fbd 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -48,9 +48,9 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.25.2"; - genVersion = "2.144.7"; - userAgent = "speakeasy-sdk/typescript 2.25.2 2.144.7 2.0.0 @speakeasy-api/openai"; + sdkVersion = "2.25.3"; + genVersion = "2.147.0"; + userAgent = "speakeasy-sdk/typescript 2.25.3 2.147.0 2.0.0 @speakeasy-api/openai"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From 266d2e9578e877aa76f7a94c03e852e6fa5a45e2 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sat, 7 Oct 2023 00:54:57 +0000 Subject: [PATCH 62/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.96.1 --- README.md | 21 +- RELEASES.md | 12 +- USAGE.md | 21 +- .../chatcompletionfunctioncalloption.md | 11 - .../shared/createchatcompletionrequest.md | 2 +- ...reatechatcompletionrequestfunctioncall1.md | 12 - .../createchatcompletionrequestmodel2.md | 20 - .../shared/createcompletionrequestmodel2.md | 20 - docs/models/shared/createeditrequest.md | 2 +- docs/models/shared/createeditrequestmodel2.md | 11 - docs/models/shared/createembeddingrequest.md | 10 - .../shared/createembeddingrequestmodel2.md | 11 - docs/models/shared/createfilerequest.md | 9 - docs/models/shared/createfilerequestfile.md | 9 - docs/models/shared/createfinetunerequest.md | 4 +- .../shared/createfinetunerequestmodel2.md | 17 - .../shared/createfinetuningjobrequest.md | 2 +- ...tuningjobrequesthyperparametersnepochs1.md | 12 - .../createfinetuningjobrequestmodel2.md | 14 - docs/models/shared/createmoderationrequest.md | 8 +- .../shared/createmoderationrequestmodel2.md | 14 - .../shared/createtranscriptionrequest.md | 13 - .../shared/createtranscriptionrequestfile.md | 9 - .../createtranscriptionrequestmodel2.md | 11 - ...reatetranscriptionrequestresponseformat.md | 15 - .../models/shared/createtranslationrequest.md | 12 - .../shared/createtranslationrequestfile.md | 9 - .../shared/createtranslationrequestmodel2.md | 11 - docs/models/shared/finetuningjob.md | 32 +- .../finetuningjobhyperparametersnepochs1.md | 11 - docs/sdks/openai/README.md | 890 +++++++++--------- files.gen | 26 - gen.yaml | 11 +- jest.config.js | 2 +- package-lock.json | 4 +- package.json | 2 +- .../chatcompletionfunctioncalloption.ts | 21 - .../shared/createchatcompletionrequest.ts | 28 - .../models/shared/createcompletionrequest.ts | 19 - src/sdk/models/shared/createeditrequest.ts | 8 - .../models/shared/createembeddingrequest.ts | 48 - src/sdk/models/shared/createfilerequest.ts | 37 - .../models/shared/createfinetunerequest.ts | 16 - .../shared/createfinetuningjobrequest.ts | 24 - .../models/shared/createmoderationrequest.ts | 13 - .../shared/createtranscriptionrequest.ts | 93 -- .../models/shared/createtranslationrequest.ts | 70 -- src/sdk/models/shared/finetuningjob.ts | 10 - src/sdk/models/shared/index.ts | 5 - src/sdk/openai.ts | 24 +- src/sdk/sdk.ts | 7 +- 51 files changed, 499 insertions(+), 1224 deletions(-) delete mode 100755 docs/models/shared/chatcompletionfunctioncalloption.md delete mode 100755 docs/models/shared/createchatcompletionrequestfunctioncall1.md delete mode 100755 docs/models/shared/createchatcompletionrequestmodel2.md delete mode 100755 docs/models/shared/createcompletionrequestmodel2.md delete mode 100755 docs/models/shared/createeditrequestmodel2.md delete mode 100755 docs/models/shared/createembeddingrequest.md delete mode 100755 docs/models/shared/createembeddingrequestmodel2.md delete mode 100755 docs/models/shared/createfilerequest.md delete mode 100755 docs/models/shared/createfilerequestfile.md delete mode 100755 docs/models/shared/createfinetunerequestmodel2.md delete mode 100755 docs/models/shared/createfinetuningjobrequesthyperparametersnepochs1.md delete mode 100755 docs/models/shared/createfinetuningjobrequestmodel2.md delete mode 100755 docs/models/shared/createmoderationrequestmodel2.md delete mode 100755 docs/models/shared/createtranscriptionrequest.md delete mode 100755 docs/models/shared/createtranscriptionrequestfile.md delete mode 100755 docs/models/shared/createtranscriptionrequestmodel2.md delete mode 100755 docs/models/shared/createtranscriptionrequestresponseformat.md delete mode 100755 docs/models/shared/createtranslationrequest.md delete mode 100755 docs/models/shared/createtranslationrequestfile.md delete mode 100755 docs/models/shared/createtranslationrequestmodel2.md delete mode 100755 docs/models/shared/finetuningjobhyperparametersnepochs1.md delete mode 100755 src/sdk/models/shared/chatcompletionfunctioncalloption.ts delete mode 100755 src/sdk/models/shared/createembeddingrequest.ts delete mode 100755 src/sdk/models/shared/createfilerequest.ts delete mode 100755 src/sdk/models/shared/createtranscriptionrequest.ts delete mode 100755 src/sdk/models/shared/createtranslationrequest.ts diff --git a/README.md b/README.md index 8968b2a..62d796a 100755 --- a/README.md +++ b/README.md @@ -41,21 +41,22 @@ Authorization: Bearer YOUR_API_KEY ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CancelFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.cancelFineTune({ + fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + }); -sdk.openAI.cancelFineTune({ - fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", -}).then((res: CancelFineTuneResponse) => { if (res.statusCode == 200) { // handle response } -}); +})(); ``` diff --git a/RELEASES.md b/RELEASES.md index ca3789c..fd23472 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -690,4 +690,14 @@ Based on: ### Generated - [typescript v2.25.3] . ### Releases -- [NPM v2.25.3] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.25.3 - . \ No newline at end of file +- [NPM v2.25.3] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.25.3 - . + +## 2023-10-07 00:54:33 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.96.1 (2.150.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.26.0] . +### Releases +- [NPM v2.26.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.26.0 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index 37c1178..f4ab472 100755 --- a/USAGE.md +++ b/USAGE.md @@ -3,20 +3,21 @@ ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CancelFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.cancelFineTune({ + fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + }); -sdk.openAI.cancelFineTune({ - fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", -}).then((res: CancelFineTuneResponse) => { if (res.statusCode == 200) { // handle response } -}); +})(); ``` \ No newline at end of file diff --git a/docs/models/shared/chatcompletionfunctioncalloption.md b/docs/models/shared/chatcompletionfunctioncalloption.md deleted file mode 100755 index f2670cf..0000000 --- a/docs/models/shared/chatcompletionfunctioncalloption.md +++ /dev/null @@ -1,11 +0,0 @@ -# ChatCompletionFunctionCallOption - -Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. - - - -## Fields - -| Field | Type | Required | Description | -| --------------------------------- | --------------------------------- | --------------------------------- | --------------------------------- | -| `name` | *string* | :heavy_check_mark: | The name of the function to call. | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionrequest.md b/docs/models/shared/createchatcompletionrequest.md index 021377b..71a80c1 100755 --- a/docs/models/shared/createchatcompletionrequest.md +++ b/docs/models/shared/createchatcompletionrequest.md @@ -11,7 +11,7 @@ | `logitBias` | Record | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
| | | `maxTokens` | *number* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) to generate in the chat completion.

The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
| | | `messages` | [ChatCompletionRequestMessage](../../models/shared/chatcompletionrequestmessage.md)[] | :heavy_check_mark: | A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). | | -| `model` | *any* | :heavy_check_mark: | ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. | | +| `model` | *any* | :heavy_check_mark: | ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. | gpt-3.5-turbo | | `n` | *number* | :heavy_minus_sign: | How many chat completion choices to generate for each input message. | 1 | | `presencePenalty` | *number* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.

[See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details)
| | | `stop` | *any* | :heavy_minus_sign: | Up to 4 sequences where the API will stop generating further tokens.
| | diff --git a/docs/models/shared/createchatcompletionrequestfunctioncall1.md b/docs/models/shared/createchatcompletionrequestfunctioncall1.md deleted file mode 100755 index 5570881..0000000 --- a/docs/models/shared/createchatcompletionrequestfunctioncall1.md +++ /dev/null @@ -1,12 +0,0 @@ -# CreateChatCompletionRequestFunctionCall1 - -Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. - - - -## Values - -| Name | Value | -| ------ | ------ | -| `None` | none | -| `Auto` | auto | \ No newline at end of file diff --git a/docs/models/shared/createchatcompletionrequestmodel2.md b/docs/models/shared/createchatcompletionrequestmodel2.md deleted file mode 100755 index 20cd4e4..0000000 --- a/docs/models/shared/createchatcompletionrequestmodel2.md +++ /dev/null @@ -1,20 +0,0 @@ -# CreateChatCompletionRequestModel2 - -ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `Gpt4` | gpt-4 | -| `Gpt40314` | gpt-4-0314 | -| `Gpt40613` | gpt-4-0613 | -| `Gpt432k` | gpt-4-32k | -| `Gpt432k0314` | gpt-4-32k-0314 | -| `Gpt432k0613` | gpt-4-32k-0613 | -| `Gpt35Turbo` | gpt-3.5-turbo | -| `Gpt35Turbo16k` | gpt-3.5-turbo-16k | -| `Gpt35Turbo0301` | gpt-3.5-turbo-0301 | -| `Gpt35Turbo0613` | gpt-3.5-turbo-0613 | -| `Gpt35Turbo16k0613` | gpt-3.5-turbo-16k-0613 | \ No newline at end of file diff --git a/docs/models/shared/createcompletionrequestmodel2.md b/docs/models/shared/createcompletionrequestmodel2.md deleted file mode 100755 index 91d4fa8..0000000 --- a/docs/models/shared/createcompletionrequestmodel2.md +++ /dev/null @@ -1,20 +0,0 @@ -# CreateCompletionRequestModel2 - -ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `Babbage002` | babbage-002 | -| `Davinci002` | davinci-002 | -| `Gpt35TurboInstruct` | gpt-3.5-turbo-instruct | -| `TextDavinci003` | text-davinci-003 | -| `TextDavinci002` | text-davinci-002 | -| `TextDavinci001` | text-davinci-001 | -| `CodeDavinci002` | code-davinci-002 | -| `TextCurie001` | text-curie-001 | -| `TextBabbage001` | text-babbage-001 | -| `TextAda001` | text-ada-001 | \ No newline at end of file diff --git a/docs/models/shared/createeditrequest.md b/docs/models/shared/createeditrequest.md index 21bc370..99ba939 100755 --- a/docs/models/shared/createeditrequest.md +++ b/docs/models/shared/createeditrequest.md @@ -7,7 +7,7 @@ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `input` | *string* | :heavy_minus_sign: | The input text to use as a starting point for the edit. | What day of the wek is it? | | `instruction` | *string* | :heavy_check_mark: | The instruction that tells the model how to edit the prompt. | Fix the spelling mistakes. | -| `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. | | +| `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. | text-davinci-edit-001 | | `n` | *number* | :heavy_minus_sign: | How many edits to generate for the input and instruction. | 1 | | `temperature` | *number* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

We generally recommend altering this or `top_p` but not both.
| 1 | | `topP` | *number* | :heavy_minus_sign: | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.

We generally recommend altering this or `temperature` but not both.
| 1 | \ No newline at end of file diff --git a/docs/models/shared/createeditrequestmodel2.md b/docs/models/shared/createeditrequestmodel2.md deleted file mode 100755 index 9234d1e..0000000 --- a/docs/models/shared/createeditrequestmodel2.md +++ /dev/null @@ -1,11 +0,0 @@ -# CreateEditRequestModel2 - -ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. - - -## Values - -| Name | Value | -| --------------------- | --------------------- | -| `TextDavinciEdit001` | text-davinci-edit-001 | -| `CodeDavinciEdit001` | code-davinci-edit-001 | \ No newline at end of file diff --git a/docs/models/shared/createembeddingrequest.md b/docs/models/shared/createembeddingrequest.md deleted file mode 100755 index c4243a1..0000000 --- a/docs/models/shared/createembeddingrequest.md +++ /dev/null @@ -1,10 +0,0 @@ -# CreateEmbeddingRequest - - -## Fields - -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `input` | *any* | :heavy_check_mark: | Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
| | -| `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
| | -| `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | \ No newline at end of file diff --git a/docs/models/shared/createembeddingrequestmodel2.md b/docs/models/shared/createembeddingrequestmodel2.md deleted file mode 100755 index 2fa004a..0000000 --- a/docs/models/shared/createembeddingrequestmodel2.md +++ /dev/null @@ -1,11 +0,0 @@ -# CreateEmbeddingRequestModel2 - -ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `TextEmbeddingAda002` | text-embedding-ada-002 | \ No newline at end of file diff --git a/docs/models/shared/createfilerequest.md b/docs/models/shared/createfilerequest.md deleted file mode 100755 index b31af0f..0000000 --- a/docs/models/shared/createfilerequest.md +++ /dev/null @@ -1,9 +0,0 @@ -# CreateFileRequest - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `file` | [CreateFileRequestFile](../../models/shared/createfilerequestfile.md) | :heavy_check_mark: | The file object (not file name) to be uploaded.

If the `purpose` is set to "fine-tune", the file will be used for fine-tuning.
| -| `purpose` | *string* | :heavy_check_mark: | The intended purpose of the uploaded file.

Use "fine-tune" for [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file is correct for fine-tuning.
| \ No newline at end of file diff --git a/docs/models/shared/createfilerequestfile.md b/docs/models/shared/createfilerequestfile.md deleted file mode 100755 index 3bb80b5..0000000 --- a/docs/models/shared/createfilerequestfile.md +++ /dev/null @@ -1,9 +0,0 @@ -# CreateFileRequestFile - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `content` | *Uint8Array* | :heavy_check_mark: | N/A | -| `file` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createfinetunerequest.md b/docs/models/shared/createfinetunerequest.md index 7a8838e..df76c1b 100755 --- a/docs/models/shared/createfinetunerequest.md +++ b/docs/models/shared/createfinetunerequest.md @@ -6,12 +6,12 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `batchSize` | *number* | :heavy_minus_sign: | The batch size to use for training. The batch size is the number of
training examples used to train a single forward and backward pass.

By default, the batch size will be dynamically configured to be
~0.2% of the number of examples in the training set, capped at 256 -
in general, we've found that larger batch sizes tend to work better
for larger datasets.
| | -| `classificationBetas` | *number*[] | :heavy_minus_sign: | If this is provided, we calculate F-beta scores at the specified
beta values. The F-beta score is a generalization of F-1 score.
This is only used for binary classification.

With a beta of 1 (i.e. the F-1 score), precision and recall are
given the same weight. A larger beta score puts more weight on
recall and less on precision. A smaller beta score puts more weight
on precision and less on recall.
| | +| `classificationBetas` | *number*[] | :heavy_minus_sign: | If this is provided, we calculate F-beta scores at the specified
beta values. The F-beta score is a generalization of F-1 score.
This is only used for binary classification.

With a beta of 1 (i.e. the F-1 score), precision and recall are
given the same weight. A larger beta score puts more weight on
recall and less on precision. A smaller beta score puts more weight
on precision and less on recall.
| 0.6,1,1.5,2 | | `classificationNClasses` | *number* | :heavy_minus_sign: | The number of classes in a classification task.

This parameter is required for multiclass classification.
| | | `classificationPositiveClass` | *string* | :heavy_minus_sign: | The positive class in binary classification.

This parameter is needed to generate precision, recall, and F1
metrics when doing binary classification.
| | | `computeClassificationMetrics` | *boolean* | :heavy_minus_sign: | If set, we calculate classification-specific metrics such as accuracy
and F-1 score using the validation set at the end of every epoch.
These metrics can be viewed in the [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model).

In order to compute classification metrics, you must provide a
`validation_file`. Additionally, you must
specify `classification_n_classes` for multiclass classification or
`classification_positive_class` for binary classification.
| | | `learningRateMultiplier` | *number* | :heavy_minus_sign: | The learning rate multiplier to use for training.
The fine-tuning learning rate is the original learning rate used for
pretraining multiplied by this value.

By default, the learning rate multiplier is the 0.05, 0.1, or 0.2
depending on final `batch_size` (larger learning rates tend to
perform better with larger batch sizes). We recommend experimenting
with values in the range 0.02 to 0.2 to see what produces the best
results.
| | -| `model` | *any* | :heavy_minus_sign: | The name of the base model to fine-tune. You can select one of "ada",
"babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22.
To learn more about these models, see the
[Models](/docs/models) documentation.
| | +| `model` | *any* | :heavy_minus_sign: | The name of the base model to fine-tune. You can select one of "ada",
"babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22.
To learn more about these models, see the
[Models](/docs/models) documentation.
| curie | | `nEpochs` | *number* | :heavy_minus_sign: | The number of epochs to train the model for. An epoch refers to one
full cycle through the training dataset.
| | | `promptLossWeight` | *number* | :heavy_minus_sign: | The weight to use for loss on the prompt tokens. This controls how
much the model tries to learn to generate the prompt (as compared
to the completion which always has a weight of 1.0), and can add
a stabilizing effect to training when completions are short.

If prompts are extremely long (relative to completions), it may make
sense to reduce this weight so as to avoid over-prioritizing
learning the prompt.
| | | `suffix` | *string* | :heavy_minus_sign: | A string of up to 40 characters that will be added to your fine-tuned model name.

For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
| | diff --git a/docs/models/shared/createfinetunerequestmodel2.md b/docs/models/shared/createfinetunerequestmodel2.md deleted file mode 100755 index 61cae6a..0000000 --- a/docs/models/shared/createfinetunerequestmodel2.md +++ /dev/null @@ -1,17 +0,0 @@ -# CreateFineTuneRequestModel2 - -The name of the base model to fine-tune. You can select one of "ada", -"babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. -To learn more about these models, see the -[Models](/docs/models) documentation. - - - -## Values - -| Name | Value | -| --------- | --------- | -| `Ada` | ada | -| `Babbage` | babbage | -| `Curie` | curie | -| `Davinci` | davinci | \ No newline at end of file diff --git a/docs/models/shared/createfinetuningjobrequest.md b/docs/models/shared/createfinetuningjobrequest.md index 04bd699..12f889a 100755 --- a/docs/models/shared/createfinetuningjobrequest.md +++ b/docs/models/shared/createfinetuningjobrequest.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | Example | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `hyperparameters` | [CreateFineTuningJobRequestHyperparameters](../../models/shared/createfinetuningjobrequesthyperparameters.md) | :heavy_minus_sign: | The hyperparameters used for the fine-tuning job. | | -| `model` | *any* | :heavy_check_mark: | The name of the model to fine-tune. You can select one of the
[supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
| | +| `model` | *any* | :heavy_check_mark: | The name of the model to fine-tune. You can select one of the
[supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
| gpt-3.5-turbo | | `suffix` | *string* | :heavy_minus_sign: | A string of up to 18 characters that will be added to your fine-tuned model name.

For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
| | | `trainingFile` | *string* | :heavy_check_mark: | The ID of an uploaded file that contains training data.

See [upload file](/docs/api-reference/files/upload) for how to upload a file.

Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.

See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
| file-abc123 | | `validationFile` | *string* | :heavy_minus_sign: | The ID of an uploaded file that contains validation data.

If you provide this file, the data is used to generate validation
metrics periodically during fine-tuning. These metrics can be viewed in
the fine-tuning results file.
The same data should not be present in both train and validation files.

Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`.

See the [fine-tuning guide](/docs/guides/fine-tuning) for more details.
| file-abc123 | \ No newline at end of file diff --git a/docs/models/shared/createfinetuningjobrequesthyperparametersnepochs1.md b/docs/models/shared/createfinetuningjobrequesthyperparametersnepochs1.md deleted file mode 100755 index 643ab74..0000000 --- a/docs/models/shared/createfinetuningjobrequesthyperparametersnepochs1.md +++ /dev/null @@ -1,12 +0,0 @@ -# CreateFineTuningJobRequestHyperparametersNEpochs1 - -The number of epochs to train the model for. An epoch refers to one -full cycle through the training dataset. - - - -## Values - -| Name | Value | -| ------ | ------ | -| `Auto` | auto | \ No newline at end of file diff --git a/docs/models/shared/createfinetuningjobrequestmodel2.md b/docs/models/shared/createfinetuningjobrequestmodel2.md deleted file mode 100755 index 627ccfb..0000000 --- a/docs/models/shared/createfinetuningjobrequestmodel2.md +++ /dev/null @@ -1,14 +0,0 @@ -# CreateFineTuningJobRequestModel2 - -The name of the model to fine-tune. You can select one of the -[supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - - - -## Values - -| Name | Value | -| ------------- | ------------- | -| `Babbage002` | babbage-002 | -| `Davinci002` | davinci-002 | -| `Gpt35Turbo` | gpt-3.5-turbo | \ No newline at end of file diff --git a/docs/models/shared/createmoderationrequest.md b/docs/models/shared/createmoderationrequest.md index 1b9a4be..14fa271 100755 --- a/docs/models/shared/createmoderationrequest.md +++ b/docs/models/shared/createmoderationrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `input` | *any* | :heavy_check_mark: | The input text to classify | -| `model` | *any* | :heavy_minus_sign: | Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.

The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
| \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input` | *any* | :heavy_check_mark: | The input text to classify | | +| `model` | *any* | :heavy_minus_sign: | Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.

The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
| text-moderation-stable | \ No newline at end of file diff --git a/docs/models/shared/createmoderationrequestmodel2.md b/docs/models/shared/createmoderationrequestmodel2.md deleted file mode 100755 index c5138bd..0000000 --- a/docs/models/shared/createmoderationrequestmodel2.md +++ /dev/null @@ -1,14 +0,0 @@ -# CreateModerationRequestModel2 - -Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. - -The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `TextModerationLatest` | text-moderation-latest | -| `TextModerationStable` | text-moderation-stable | \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequest.md b/docs/models/shared/createtranscriptionrequest.md deleted file mode 100755 index 8638432..0000000 --- a/docs/models/shared/createtranscriptionrequest.md +++ /dev/null @@ -1,13 +0,0 @@ -# CreateTranscriptionRequest - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `file` | [CreateTranscriptionRequestFile](../../models/shared/createtranscriptionrequestfile.md) | :heavy_check_mark: | The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
| -| `language` | *string* | :heavy_minus_sign: | The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
| -| `model` | *any* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| -| `prompt` | *string* | :heavy_minus_sign: | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
| -| `responseFormat` | [CreateTranscriptionRequestResponseFormat](../../models/shared/createtranscriptionrequestresponseformat.md) | :heavy_minus_sign: | The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
| -| `temperature` | *number* | :heavy_minus_sign: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
| \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequestfile.md b/docs/models/shared/createtranscriptionrequestfile.md deleted file mode 100755 index 76b878c..0000000 --- a/docs/models/shared/createtranscriptionrequestfile.md +++ /dev/null @@ -1,9 +0,0 @@ -# CreateTranscriptionRequestFile - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `content` | *Uint8Array* | :heavy_check_mark: | N/A | -| `file` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequestmodel2.md b/docs/models/shared/createtranscriptionrequestmodel2.md deleted file mode 100755 index 5057fc9..0000000 --- a/docs/models/shared/createtranscriptionrequestmodel2.md +++ /dev/null @@ -1,11 +0,0 @@ -# CreateTranscriptionRequestModel2 - -ID of the model to use. Only `whisper-1` is currently available. - - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `Whisper1` | whisper-1 | \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequestresponseformat.md b/docs/models/shared/createtranscriptionrequestresponseformat.md deleted file mode 100755 index 13488d6..0000000 --- a/docs/models/shared/createtranscriptionrequestresponseformat.md +++ /dev/null @@ -1,15 +0,0 @@ -# CreateTranscriptionRequestResponseFormat - -The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. - - - -## Values - -| Name | Value | -| ------------- | ------------- | -| `Json` | json | -| `Text` | text | -| `Srt` | srt | -| `VerboseJson` | verbose_json | -| `Vtt` | vtt | \ No newline at end of file diff --git a/docs/models/shared/createtranslationrequest.md b/docs/models/shared/createtranslationrequest.md deleted file mode 100755 index 68e4c52..0000000 --- a/docs/models/shared/createtranslationrequest.md +++ /dev/null @@ -1,12 +0,0 @@ -# CreateTranslationRequest - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `file` | [CreateTranslationRequestFile](../../models/shared/createtranslationrequestfile.md) | :heavy_check_mark: | The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
| -| `model` | *any* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| -| `prompt` | *string* | :heavy_minus_sign: | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
| -| `responseFormat` | *string* | :heavy_minus_sign: | The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
| -| `temperature` | *number* | :heavy_minus_sign: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
| \ No newline at end of file diff --git a/docs/models/shared/createtranslationrequestfile.md b/docs/models/shared/createtranslationrequestfile.md deleted file mode 100755 index f143930..0000000 --- a/docs/models/shared/createtranslationrequestfile.md +++ /dev/null @@ -1,9 +0,0 @@ -# CreateTranslationRequestFile - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `content` | *Uint8Array* | :heavy_check_mark: | N/A | -| `file` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createtranslationrequestmodel2.md b/docs/models/shared/createtranslationrequestmodel2.md deleted file mode 100755 index 51f965c..0000000 --- a/docs/models/shared/createtranslationrequestmodel2.md +++ /dev/null @@ -1,11 +0,0 @@ -# CreateTranslationRequestModel2 - -ID of the model to use. Only `whisper-1` is currently available. - - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `Whisper1` | whisper-1 | \ No newline at end of file diff --git a/docs/models/shared/finetuningjob.md b/docs/models/shared/finetuningjob.md index c1b8500..82e900c 100755 --- a/docs/models/shared/finetuningjob.md +++ b/docs/models/shared/finetuningjob.md @@ -6,19 +6,19 @@ The `fine_tuning.job` object represents a fine-tuning job that has been created ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `createdAt` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) for when the fine-tuning job was created. | -| `error` | [FineTuningJobError](../../models/shared/finetuningjoberror.md) | :heavy_check_mark: | For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. | -| `fineTunedModel` | *string* | :heavy_check_mark: | The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. | -| `finishedAt` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. | -| `hyperparameters` | [FineTuningJobHyperparameters](../../models/shared/finetuningjobhyperparameters.md) | :heavy_check_mark: | The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. | -| `id` | *string* | :heavy_check_mark: | The object identifier, which can be referenced in the API endpoints. | -| `model` | *string* | :heavy_check_mark: | The base model that is being fine-tuned. | -| `object` | *string* | :heavy_check_mark: | The object type, which is always "fine_tuning.job". | -| `organizationId` | *string* | :heavy_check_mark: | The organization that owns the fine-tuning job. | -| `resultFiles` | *string*[] | :heavy_check_mark: | The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). | -| `status` | *string* | :heavy_check_mark: | The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. | -| `trainedTokens` | *number* | :heavy_check_mark: | The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. | -| `trainingFile` | *string* | :heavy_check_mark: | The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). | -| `validationFile` | *string* | :heavy_check_mark: | The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). | \ No newline at end of file +| Field | Type | Required | Description | Example | +| -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `createdAt` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) for when the fine-tuning job was created. | | +| `error` | [FineTuningJobError](../../models/shared/finetuningjoberror.md) | :heavy_check_mark: | For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. | | +| `fineTunedModel` | *string* | :heavy_check_mark: | The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. | | +| `finishedAt` | *number* | :heavy_check_mark: | The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. | | +| `hyperparameters` | [FineTuningJobHyperparameters](../../models/shared/finetuningjobhyperparameters.md) | :heavy_check_mark: | The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. | | +| `id` | *string* | :heavy_check_mark: | The object identifier, which can be referenced in the API endpoints. | | +| `model` | *string* | :heavy_check_mark: | The base model that is being fine-tuned. | | +| `object` | *string* | :heavy_check_mark: | The object type, which is always "fine_tuning.job". | | +| `organizationId` | *string* | :heavy_check_mark: | The organization that owns the fine-tuning job. | | +| `resultFiles` | *string*[] | :heavy_check_mark: | The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). | file-abc123 | +| `status` | *string* | :heavy_check_mark: | The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. | | +| `trainedTokens` | *number* | :heavy_check_mark: | The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. | | +| `trainingFile` | *string* | :heavy_check_mark: | The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). | | +| `validationFile` | *string* | :heavy_check_mark: | The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). | | \ No newline at end of file diff --git a/docs/models/shared/finetuningjobhyperparametersnepochs1.md b/docs/models/shared/finetuningjobhyperparametersnepochs1.md deleted file mode 100755 index 8774e43..0000000 --- a/docs/models/shared/finetuningjobhyperparametersnepochs1.md +++ /dev/null @@ -1,11 +0,0 @@ -# FineTuningJobHyperparametersNEpochs1 - -The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. -"auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. - - -## Values - -| Name | Value | -| ------ | ------ | -| `Auto` | auto | \ No newline at end of file diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md index 3901455..feb8479 100755 --- a/docs/sdks/openai/README.md +++ b/docs/sdks/openai/README.md @@ -70,21 +70,22 @@ Immediately cancel a fine-tune job. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CancelFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.cancelFineTune({ + fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + }); -sdk.openAI.cancelFineTune({ - fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", -}).then((res: CancelFineTuneResponse) => { if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -109,21 +110,22 @@ Immediately cancel a fine-tune job. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CancelFineTuningJobResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.cancelFineTuningJob({ + fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + }); -sdk.openAI.cancelFineTuningJob({ - fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", -}).then((res: CancelFineTuningJobResponse) => { if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -147,63 +149,50 @@ Creates a model response for the given chat conversation. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CreateChatCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { - ChatCompletionRequestMessageRole, - CreateChatCompletionRequestFunctionCall1, - CreateChatCompletionRequestModel2, -} from "@speakeasy-api/openai/dist/sdk/models/shared"; - -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.createChatCompletion({ - frequencyPenalty: 7707.26, - functionCall: { - name: "Diesel Money", - }, - functions: [ - { - description: "Progressive radical model", - name: "Account International incidunt", - parameters: { - "eum": "Meadows", - }, +import { ChatCompletionRequestMessageRole } from "@speakeasy-api/openai/dist/sdk/models/shared"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", }, - ], - logitBias: { - "eos": 206153, - }, - maxTokens: 29019, - messages: [ - { - content: "Northeast frictionless Park", - functionCall: { - arguments: "Future Southeast", - name: "Southeast", + }); + + const res = await sdk.openAI.createChatCompletion({ + functionCall: "Hybrid", + functions: [ + { + name: "Hoboken reinvent Web", + parameters: { + "Southeast": "International", + }, }, - name: "Faso", - role: ChatCompletionRequestMessageRole.User, + ], + logitBias: { + "incidunt": 432116, }, - ], - model: "gpt-3.5-turbo", - n: 1, - presencePenalty: 9408.67, - stop: [ - "tangible", - ], - stream: false, - temperature: 1, - topP: 1, - user: "user-1234", -}).then((res: CreateChatCompletionResponse) => { + messages: [ + { + content: "abbreviate", + functionCall: { + arguments: "Directives Chair", + name: "Northeast frictionless Park", + }, + role: ChatCompletionRequestMessageRole.Assistant, + }, + ], + model: "gpt-3.5-turbo", + n: 1, + stop: "Future", + temperature: 1, + topP: 1, + user: "user-1234", + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -227,41 +216,33 @@ Creates a completion for the provided prompt and parameters. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CreateCompletionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { CreateCompletionRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; - -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.createCompletion({ - bestOf: 160667, - echo: false, - frequencyPenalty: 141.61, - logitBias: { - "velit": 254881, - }, - logprobs: 877910, - maxTokens: 16, - model: CreateCompletionRequestModel2.TextCurie001, - n: 1, - presencePenalty: 7232.16, - prompt: "This is a test.", - stop: [ - "[\"\n\"]", - ], - stream: false, - suffix: "test.", - temperature: 1, - topP: 1, - user: "user-1234", -}).then((res: CreateCompletionResponse) => { + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.createCompletion({ + logitBias: { + "red": 242695, + }, + maxTokens: 16, + model: "Fresh", + n: 1, + prompt: "Reggae", + stop: "Fluorine", + suffix: "test.", + temperature: 1, + topP: 1, + user: "user-1234", + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -287,27 +268,27 @@ Creates a new edit for the provided input, instruction, and parameters. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CreateEditResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { CreateEditRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; - -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.createEdit({ - input: "What day of the wek is it?", - instruction: "Fix the spelling mistakes.", - model: "text-davinci-edit-001", - n: 1, - temperature: 1, - topP: 1, -}).then((res: CreateEditResponse) => { + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.createEdit({ + input: "What day of the wek is it?", + instruction: "Fix the spelling mistakes.", + model: "text-davinci-edit-001", + n: 1, + temperature: 1, + topP: 1, + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -331,36 +312,30 @@ Creates an embedding vector representing the input text. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CreateEmbeddingResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { CreateEmbeddingRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; - -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.createEmbedding({ - input: [ - [ - 115613, - ], - ], - model: CreateEmbeddingRequestModel2.TextEmbeddingAda002, - user: "user-1234", -}).then((res: CreateEmbeddingResponse) => { + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.createEmbedding({ + "chief": "compressing", + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `request` | [shared.CreateEmbeddingRequest](../../models/shared/createembeddingrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `request` | [Record](../../models//.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | ### Response @@ -377,33 +352,30 @@ Upload a file that can be used across various endpoints/features. Currently, the ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CreateFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.createFile({ - file: { - content: "`'$Z`(L/RH" as bytes <<<>>>, - file: "Rap National", - }, - purpose: "Female synergistic Maine", -}).then((res: CreateFileResponse) => { + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.createFile({ + "Associate": "Miami", + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `request` | [shared.CreateFileRequest](../../models/shared/createfilerequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `request` | [Record](../../models//.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | ### Response @@ -426,35 +398,30 @@ Response includes details of the enqueued job including job status and the name ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CreateFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { CreateFineTuneRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; - -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.createFineTune({ - batchSize: 763928, - classificationBetas: [ - 3993.02, - ], - classificationNClasses: 172686, - classificationPositiveClass: "male Buckinghamshire", - computeClassificationMetrics: false, - learningRateMultiplier: 4447.26, - model: CreateFineTuneRequestModel2.Curie, - nEpochs: 441380, - promptLossWeight: 37.22, - suffix: "Reggae Gorgeous synthesizing", - trainingFile: "file-abc123", - validationFile: "file-abc123", -}).then((res: CreateFineTuneResponse) => { + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.createFineTune({ + classificationBetas: [ + 0.6, + 1, + 1.5, + 2, + ], + model: "curie", + trainingFile: "file-abc123", + validationFile: "file-abc123", + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -483,31 +450,27 @@ Response includes details of the enqueued job including job status and the name ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CreateFineTuningJobResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { - CreateFineTuningJobRequestHyperparametersNEpochs1, - CreateFineTuningJobRequestModel2, -} from "@speakeasy-api/openai/dist/sdk/models/shared"; - -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.createFineTuningJob({ - hyperparameters: { - nEpochs: CreateFineTuningJobRequestHyperparametersNEpochs1.Auto, - }, - model: "gpt-3.5-turbo", - suffix: "Thallium", - trainingFile: "file-abc123", - validationFile: "file-abc123", -}).then((res: CreateFineTuningJobResponse) => { + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.createFineTuningJob({ + hyperparameters: { + nEpochs: "empower", + }, + model: "gpt-3.5-turbo", + trainingFile: "file-abc123", + validationFile: "file-abc123", + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -531,26 +494,27 @@ Creates an image given a prompt. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CreateImageResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; import { CreateImageRequestResponseFormat, CreateImageRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.createImage({ - n: 1, - prompt: "A cute baby sea otter", - responseFormat: CreateImageRequestResponseFormat.Url, - size: CreateImageRequestSize.OneThousandAndTwentyFourx1024, - user: "user-1234", -}).then((res: CreateImageResponse) => { +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.createImage({ + n: 1, + prompt: "A cute baby sea otter", + responseFormat: CreateImageRequestResponseFormat.Url, + size: CreateImageRequestSize.OneThousandAndTwentyFourx1024, + user: "user-1234", + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -574,34 +538,35 @@ Creates an edited or extended image given an original image and a prompt. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CreateImageEditResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; import { CreateImageEditRequestResponseFormat, CreateImageEditRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.createImageEdit({ - image: { - content: "0]/(|3W_T9" as bytes <<<>>>, - image: "https://loremflickr.com/640/480", - }, - mask: { - content: "`^YjrpxopK" as bytes <<<>>>, - mask: "Rap Dodge Incredible", - }, - n: 1, - prompt: "A cute baby sea otter wearing a beret", - responseFormat: CreateImageEditRequestResponseFormat.Url, - size: CreateImageEditRequestSize.OneThousandAndTwentyFourx1024, - user: "user-1234", -}).then((res: CreateImageEditResponse) => { +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.createImageEdit({ + image: { + content: "0]/(|3W_T9" as bytes <<<>>>, + image: "https://loremflickr.com/640/480", + }, + mask: { + content: "`^YjrpxopK" as bytes <<<>>>, + mask: "Rap Dodge Incredible", + }, + n: 1, + prompt: "A cute baby sea otter wearing a beret", + responseFormat: CreateImageEditRequestResponseFormat.Url, + size: CreateImageEditRequestSize.OneThousandAndTwentyFourx1024, + user: "user-1234", + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -625,29 +590,30 @@ Creates a variation of a given image. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CreateImageVariationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; import { CreateImageVariationRequestResponseFormat, CreateImageVariationRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.createImageVariation({ - image: { - content: "`YY7PCrWuK" as bytes <<<>>>, - image: "https://loremflickr.com/640/480", - }, - n: 1, - responseFormat: CreateImageVariationRequestResponseFormat.Url, - size: CreateImageVariationRequestSize.OneThousandAndTwentyFourx1024, - user: "user-1234", -}).then((res: CreateImageVariationResponse) => { +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.createImageVariation({ + image: { + content: "`YY7PCrWuK" as bytes <<<>>>, + image: "https://loremflickr.com/640/480", + }, + n: 1, + responseFormat: CreateImageVariationRequestResponseFormat.Url, + size: CreateImageVariationRequestSize.OneThousandAndTwentyFourx1024, + user: "user-1234", + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -671,25 +637,23 @@ Classifies if text violates OpenAI's Content Policy ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CreateModerationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { CreateModerationRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; - -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.createModeration({ - input: [ - "I want to kill them.", - ], - model: CreateModerationRequestModel2.TextModerationStable, -}).then((res: CreateModerationResponse) => { + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.createModeration({ + input: "stable", + model: "text-moderation-stable", + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -713,38 +677,30 @@ Transcribes audio into the input language. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CreateTranscriptionResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { CreateTranscriptionRequestModel2, CreateTranscriptionRequestResponseFormat } from "@speakeasy-api/openai/dist/sdk/models/shared"; - -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.createTranscription({ - file: { - content: "\#BbTW'zX9" as bytes <<<>>>, - file: "Buckinghamshire", - }, - language: "teal Titanium", - model: "whisper-1", - prompt: "Mendelevium Kansas behind", - responseFormat: CreateTranscriptionRequestResponseFormat.Json, - temperature: 3694.44, -}).then((res: CreateTranscriptionResponse) => { + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.createTranscription({ + "Lead": "neutral", + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `request` | [shared.CreateTranscriptionRequest](../../models/shared/createtranscriptionrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `request` | [Record](../../models//.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | ### Response @@ -760,37 +716,30 @@ Translates audio into English. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { CreateTranslationResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -import { CreateTranslationRequestModel2 } from "@speakeasy-api/openai/dist/sdk/models/shared"; - -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.createTranslation({ - file: { - content: "M57UL;W3rx" as bytes <<<>>>, - file: "Reggae Toys silver", - }, - model: CreateTranslationRequestModel2.Whisper1, - prompt: "Soft East Frozen", - responseFormat: "Analyst aboard relocate", - temperature: 6003.73, -}).then((res: CreateTranslationResponse) => { + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.createTranslation({ + "DRAM": "Granite", + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `request` | [shared.CreateTranslationRequest](../../models/shared/createtranslationrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `request` | [Record](../../models//.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | ### Response @@ -806,21 +755,22 @@ Delete a file. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { DeleteFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.deleteFile({ + fileId: "yellow kiddingly white", + }); -sdk.openAI.deleteFile({ - fileId: "yellow kiddingly white", -}).then((res: DeleteFileResponse) => { if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -844,21 +794,22 @@ Delete a fine-tuned model. You must have the Owner role in your organization to ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { DeleteModelResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.deleteModel({ + model: "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + }); -sdk.openAI.deleteModel({ - model: "ft:gpt-3.5-turbo:acemeco:suffix:abc123", -}).then((res: DeleteModelResponse) => { if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -882,21 +833,22 @@ Returns the contents of the specified file. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { DownloadFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.downloadFile({ + fileId: "Maserati Bronze Audi", + }); -sdk.openAI.downloadFile({ - fileId: "Maserati Bronze Audi", -}).then((res: DownloadFileResponse) => { if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -920,19 +872,20 @@ Returns a list of files that belong to the user's organization. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { ListFilesResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.listFiles(); -sdk.openAI.listFiles().then((res: ListFilesResponse) => { if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -958,22 +911,22 @@ Get fine-grained status updates for a fine-tune job. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { ListFineTuneEventsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.listFineTuneEvents({ - fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - stream: false, -}).then((res: ListFineTuneEventsResponse) => { + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.listFineTuneEvents({ + fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -1000,19 +953,20 @@ List your organization's fine-tuning jobs ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { ListFineTunesResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.listFineTunes(); -sdk.openAI.listFineTunes().then((res: ListFineTunesResponse) => { if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -1036,23 +990,22 @@ Get status updates for a fine-tuning job. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { ListFineTuningEventsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.listFineTuningEvents({ - after: "phew silver Consultant", - fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - limit: 104325, -}).then((res: ListFineTuningEventsResponse) => { + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.listFineTuningEvents({ + fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + }); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -1076,19 +1029,20 @@ Lists the currently available models, and provides basic information about each ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { ListModelsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.listModels(); -sdk.openAI.listModels().then((res: ListModelsResponse) => { if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -1112,22 +1066,20 @@ List your organization's fine-tuning jobs ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { ListPaginatedFineTuningJobsResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; - -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); - -sdk.openAI.listPaginatedFineTuningJobs({ - after: "GB voluptate", - limit: 374490, -}).then((res: ListPaginatedFineTuningJobsResponse) => { + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.listPaginatedFineTuningJobs({}); + if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -1151,21 +1103,22 @@ Returns information about a specific file. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { RetrieveFileResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.retrieveFile({ + fileId: "online Facilitator enfold", + }); -sdk.openAI.retrieveFile({ - fileId: "online Facilitator enfold", -}).then((res: RetrieveFileResponse) => { if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -1194,21 +1147,22 @@ Gets info about the fine-tune job. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { RetrieveFineTuneResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.retrieveFineTune({ + fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + }); -sdk.openAI.retrieveFineTune({ - fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", -}).then((res: RetrieveFineTuneResponse) => { if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -1235,21 +1189,22 @@ Get info about a fine-tuning job. ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { RetrieveFineTuningJobResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.retrieveFineTuningJob({ + fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + }); -sdk.openAI.retrieveFineTuningJob({ - fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", -}).then((res: RetrieveFineTuningJobResponse) => { if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters @@ -1273,21 +1228,22 @@ Retrieves a model instance, providing basic information about the model such as ```typescript import { Gpt } from "@speakeasy-api/openai"; -import { RetrieveModelResponse } from "@speakeasy-api/openai/dist/sdk/models/operations"; -const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, -}); +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.openAI.retrieveModel({ + model: "gpt-3.5-turbo", + }); -sdk.openAI.retrieveModel({ - model: "gpt-3.5-turbo", -}).then((res: RetrieveModelResponse) => { if (res.statusCode == 200) { // handle response } -}); +})(); ``` ### Parameters diff --git a/files.gen b/files.gen index fe58da2..51f43aa 100755 --- a/files.gen +++ b/files.gen @@ -58,15 +58,12 @@ src/sdk/models/shared/chatcompletionresponsemessage.ts src/sdk/models/shared/createchatcompletionrequest.ts src/sdk/models/shared/chatcompletionrequestmessage.ts src/sdk/models/shared/chatcompletionfunctions.ts -src/sdk/models/shared/chatcompletionfunctioncalloption.ts src/sdk/models/shared/createcompletionresponse.ts src/sdk/models/shared/createcompletionrequest.ts src/sdk/models/shared/createeditresponse.ts src/sdk/models/shared/createeditrequest.ts src/sdk/models/shared/createembeddingresponse.ts src/sdk/models/shared/embedding.ts -src/sdk/models/shared/createembeddingrequest.ts -src/sdk/models/shared/createfilerequest.ts src/sdk/models/shared/createfinetunerequest.ts src/sdk/models/shared/createfinetuningjobrequest.ts src/sdk/models/shared/imagesresponse.ts @@ -77,9 +74,7 @@ src/sdk/models/shared/createimagevariationrequest.ts src/sdk/models/shared/createmoderationresponse.ts src/sdk/models/shared/createmoderationrequest.ts src/sdk/models/shared/createtranscriptionresponse.ts -src/sdk/models/shared/createtranscriptionrequest.ts src/sdk/models/shared/createtranslationresponse.ts -src/sdk/models/shared/createtranslationrequest.ts src/sdk/models/shared/deletefileresponse.ts src/sdk/models/shared/deletemodelresponse.ts src/sdk/models/shared/listfilesresponse.ts @@ -139,7 +134,6 @@ docs/models/shared/finetune.md docs/models/shared/openaifile.md docs/models/shared/finetuneevent.md docs/models/shared/finetuningjoberror.md -docs/models/shared/finetuningjobhyperparametersnepochs1.md docs/models/shared/finetuningjobhyperparameters.md docs/models/shared/finetuningjob.md docs/models/shared/createchatcompletionresponsechoicesfinishreason.md @@ -149,37 +143,25 @@ docs/models/shared/completionusage.md docs/models/shared/chatcompletionresponsemessagefunctioncall.md docs/models/shared/chatcompletionresponsemessagerole.md docs/models/shared/chatcompletionresponsemessage.md -docs/models/shared/createchatcompletionrequestfunctioncall1.md -docs/models/shared/createchatcompletionrequestmodel2.md docs/models/shared/createchatcompletionrequest.md docs/models/shared/chatcompletionrequestmessagefunctioncall.md docs/models/shared/chatcompletionrequestmessagerole.md docs/models/shared/chatcompletionrequestmessage.md docs/models/shared/chatcompletionfunctions.md -docs/models/shared/chatcompletionfunctioncalloption.md docs/models/shared/createcompletionresponsechoicesfinishreason.md docs/models/shared/createcompletionresponsechoiceslogprobs.md docs/models/shared/createcompletionresponsechoices.md docs/models/shared/createcompletionresponse.md -docs/models/shared/createcompletionrequestmodel2.md docs/models/shared/createcompletionrequest.md docs/models/shared/createeditresponsechoicesfinishreason.md docs/models/shared/createeditresponsechoices.md docs/models/shared/createeditresponse.md -docs/models/shared/createeditrequestmodel2.md docs/models/shared/createeditrequest.md docs/models/shared/createembeddingresponseusage.md docs/models/shared/createembeddingresponse.md docs/models/shared/embedding.md -docs/models/shared/createembeddingrequestmodel2.md -docs/models/shared/createembeddingrequest.md -docs/models/shared/createfilerequestfile.md -docs/models/shared/createfilerequest.md -docs/models/shared/createfinetunerequestmodel2.md docs/models/shared/createfinetunerequest.md -docs/models/shared/createfinetuningjobrequesthyperparametersnepochs1.md docs/models/shared/createfinetuningjobrequesthyperparameters.md -docs/models/shared/createfinetuningjobrequestmodel2.md docs/models/shared/createfinetuningjobrequest.md docs/models/shared/imagesresponse.md docs/models/shared/image.md @@ -199,17 +181,9 @@ docs/models/shared/createmoderationresponseresultscategories.md docs/models/shared/createmoderationresponseresultscategoryscores.md docs/models/shared/createmoderationresponseresults.md docs/models/shared/createmoderationresponse.md -docs/models/shared/createmoderationrequestmodel2.md docs/models/shared/createmoderationrequest.md docs/models/shared/createtranscriptionresponse.md -docs/models/shared/createtranscriptionrequestfile.md -docs/models/shared/createtranscriptionrequestmodel2.md -docs/models/shared/createtranscriptionrequestresponseformat.md -docs/models/shared/createtranscriptionrequest.md docs/models/shared/createtranslationresponse.md -docs/models/shared/createtranslationrequestfile.md -docs/models/shared/createtranslationrequestmodel2.md -docs/models/shared/createtranslationrequest.md docs/models/shared/deletefileresponse.md docs/models/shared/deletemodelresponse.md docs/models/shared/listfilesresponse.md diff --git a/gen.yaml b/gen.yaml index 52089d5..96508f8 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: 11e459ef36cd22c19855de8f048393af docVersion: 2.0.0 - speakeasyVersion: 1.94.0 - generationVersion: 2.147.0 + speakeasyVersion: 1.96.1 + generationVersion: 2.150.0 generation: sdkClassName: gpt sdkFlattening: true @@ -11,12 +11,13 @@ generation: telemetryEnabled: false features: typescript: - core: 2.90.3 + core: 2.90.4 deprecations: 2.81.1 - globalSecurity: 2.81.1 + globalSecurity: 2.82.0 globalServerURLs: 2.82.0 typescript: - version: 2.25.3 + version: 2.26.0 author: speakeasy-openai + flattenGlobalSecurity: false maxMethodParams: 0 packageName: '@speakeasy-api/openai' diff --git a/jest.config.js b/jest.config.js index a80354c..e3f8611 100755 --- a/jest.config.js +++ b/jest.config.js @@ -1,5 +1,5 @@ module.exports = { preset: "ts-jest", testEnvironment: "node", - testPathIgnorePatterns: ["/__tests__/helpers.ts"], + testPathIgnorePatterns: ["/__tests__/helpers.ts", "/__tests__/common_helpers.ts"], }; diff --git a/package-lock.json b/package-lock.json index 276d8a0..b4a96b4 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.25.3", + "version": "2.26.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.25.3", + "version": "2.26.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index bf345f4..7fb2eb8 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.25.3", + "version": "2.26.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build" diff --git a/src/sdk/models/shared/chatcompletionfunctioncalloption.ts b/src/sdk/models/shared/chatcompletionfunctioncalloption.ts deleted file mode 100755 index 29ef817..0000000 --- a/src/sdk/models/shared/chatcompletionfunctioncalloption.ts +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; - -/** - * Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. - * - * @remarks - * - */ -export class ChatCompletionFunctionCallOption extends SpeakeasyBase { - /** - * The name of the function to call. - */ - @SpeakeasyMetadata() - @Expose({ name: "name" }) - name: string; -} diff --git a/src/sdk/models/shared/createchatcompletionrequest.ts b/src/sdk/models/shared/createchatcompletionrequest.ts index 58f3713..5cb0f87 100755 --- a/src/sdk/models/shared/createchatcompletionrequest.ts +++ b/src/sdk/models/shared/createchatcompletionrequest.ts @@ -7,34 +7,6 @@ import { ChatCompletionFunctions } from "./chatcompletionfunctions"; import { ChatCompletionRequestMessage } from "./chatcompletionrequestmessage"; import { Expose, Type } from "class-transformer"; -/** - * Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. - * - * @remarks - * - */ -export enum CreateChatCompletionRequestFunctionCall1 { - None = "none", - Auto = "auto", -} - -/** - * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. - */ -export enum CreateChatCompletionRequestModel2 { - Gpt4 = "gpt-4", - Gpt40314 = "gpt-4-0314", - Gpt40613 = "gpt-4-0613", - Gpt432k = "gpt-4-32k", - Gpt432k0314 = "gpt-4-32k-0314", - Gpt432k0613 = "gpt-4-32k-0613", - Gpt35Turbo = "gpt-3.5-turbo", - Gpt35Turbo16k = "gpt-3.5-turbo-16k", - Gpt35Turbo0301 = "gpt-3.5-turbo-0301", - Gpt35Turbo0613 = "gpt-3.5-turbo-0613", - Gpt35Turbo16k0613 = "gpt-3.5-turbo-16k-0613", -} - export class CreateChatCompletionRequest extends SpeakeasyBase { /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. diff --git a/src/sdk/models/shared/createcompletionrequest.ts b/src/sdk/models/shared/createcompletionrequest.ts index e7c54e8..5e5c56a 100755 --- a/src/sdk/models/shared/createcompletionrequest.ts +++ b/src/sdk/models/shared/createcompletionrequest.ts @@ -5,25 +5,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; -/** - * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - * - * @remarks - * - */ -export enum CreateCompletionRequestModel2 { - Babbage002 = "babbage-002", - Davinci002 = "davinci-002", - Gpt35TurboInstruct = "gpt-3.5-turbo-instruct", - TextDavinci003 = "text-davinci-003", - TextDavinci002 = "text-davinci-002", - TextDavinci001 = "text-davinci-001", - CodeDavinci002 = "code-davinci-002", - TextCurie001 = "text-curie-001", - TextBabbage001 = "text-babbage-001", - TextAda001 = "text-ada-001", -} - export class CreateCompletionRequest extends SpeakeasyBase { /** * Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. diff --git a/src/sdk/models/shared/createeditrequest.ts b/src/sdk/models/shared/createeditrequest.ts index a5933fb..f219940 100755 --- a/src/sdk/models/shared/createeditrequest.ts +++ b/src/sdk/models/shared/createeditrequest.ts @@ -5,14 +5,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; -/** - * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. - */ -export enum CreateEditRequestModel2 { - TextDavinciEdit001 = "text-davinci-edit-001", - CodeDavinciEdit001 = "code-davinci-edit-001", -} - export class CreateEditRequest extends SpeakeasyBase { /** * The input text to use as a starting point for the edit. diff --git a/src/sdk/models/shared/createembeddingrequest.ts b/src/sdk/models/shared/createembeddingrequest.ts deleted file mode 100755 index 11dcdf9..0000000 --- a/src/sdk/models/shared/createembeddingrequest.ts +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; - -/** - * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - * - * @remarks - * - */ -export enum CreateEmbeddingRequestModel2 { - TextEmbeddingAda002 = "text-embedding-ada-002", -} - -export class CreateEmbeddingRequest extends SpeakeasyBase { - /** - * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. - * - * @remarks - * - */ - @SpeakeasyMetadata() - @Expose({ name: "input" }) - input: any; - - /** - * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - * - * @remarks - * - */ - @SpeakeasyMetadata() - @Expose({ name: "model" }) - model: any; - - /** - * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). - * - * @remarks - * - */ - @SpeakeasyMetadata() - @Expose({ name: "user" }) - user?: string; -} diff --git a/src/sdk/models/shared/createfilerequest.ts b/src/sdk/models/shared/createfilerequest.ts deleted file mode 100755 index eb2eec1..0000000 --- a/src/sdk/models/shared/createfilerequest.ts +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; - -export class CreateFileRequestFile extends SpeakeasyBase { - @SpeakeasyMetadata({ data: "multipart_form, content=true" }) - content: Uint8Array; - - @SpeakeasyMetadata({ data: "multipart_form, name=file" }) - file: string; -} - -export class CreateFileRequest extends SpeakeasyBase { - /** - * The file object (not file name) to be uploaded. - * - * @remarks - * - * If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. - * - */ - @SpeakeasyMetadata({ data: "multipart_form, file=true" }) - file: CreateFileRequestFile; - - /** - * The intended purpose of the uploaded file. - * - * @remarks - * - * Use "fine-tune" for [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file is correct for fine-tuning. - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=purpose" }) - purpose: string; -} diff --git a/src/sdk/models/shared/createfinetunerequest.ts b/src/sdk/models/shared/createfinetunerequest.ts index 0ede59f..8a96f33 100755 --- a/src/sdk/models/shared/createfinetunerequest.ts +++ b/src/sdk/models/shared/createfinetunerequest.ts @@ -5,22 +5,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; -/** - * The name of the base model to fine-tune. You can select one of "ada", - * - * @remarks - * "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. - * To learn more about these models, see the - * [Models](/docs/models) documentation. - * - */ -export enum CreateFineTuneRequestModel2 { - Ada = "ada", - Babbage = "babbage", - Curie = "curie", - Davinci = "davinci", -} - export class CreateFineTuneRequest extends SpeakeasyBase { /** * The batch size to use for training. The batch size is the number of diff --git a/src/sdk/models/shared/createfinetuningjobrequest.ts b/src/sdk/models/shared/createfinetuningjobrequest.ts index 7e49f8b..c017396 100755 --- a/src/sdk/models/shared/createfinetuningjobrequest.ts +++ b/src/sdk/models/shared/createfinetuningjobrequest.ts @@ -5,17 +5,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose, Type } from "class-transformer"; -/** - * The number of epochs to train the model for. An epoch refers to one - * - * @remarks - * full cycle through the training dataset. - * - */ -export enum CreateFineTuningJobRequestHyperparametersNEpochs1 { - Auto = "auto", -} - /** * The hyperparameters used for the fine-tuning job. */ @@ -32,19 +21,6 @@ export class CreateFineTuningJobRequestHyperparameters extends SpeakeasyBase { nEpochs?: any; } -/** - * The name of the model to fine-tune. You can select one of the - * - * @remarks - * [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). - * - */ -export enum CreateFineTuningJobRequestModel2 { - Babbage002 = "babbage-002", - Davinci002 = "davinci-002", - Gpt35Turbo = "gpt-3.5-turbo", -} - export class CreateFineTuningJobRequest extends SpeakeasyBase { /** * The hyperparameters used for the fine-tuning job. diff --git a/src/sdk/models/shared/createmoderationrequest.ts b/src/sdk/models/shared/createmoderationrequest.ts index a1db94a..e196f17 100755 --- a/src/sdk/models/shared/createmoderationrequest.ts +++ b/src/sdk/models/shared/createmoderationrequest.ts @@ -5,19 +5,6 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; -/** - * Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. - * - * @remarks - * - * The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. - * - */ -export enum CreateModerationRequestModel2 { - TextModerationLatest = "text-moderation-latest", - TextModerationStable = "text-moderation-stable", -} - export class CreateModerationRequest extends SpeakeasyBase { /** * The input text to classify diff --git a/src/sdk/models/shared/createtranscriptionrequest.ts b/src/sdk/models/shared/createtranscriptionrequest.ts deleted file mode 100755 index 61ba8da..0000000 --- a/src/sdk/models/shared/createtranscriptionrequest.ts +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; - -export class CreateTranscriptionRequestFile extends SpeakeasyBase { - @SpeakeasyMetadata({ data: "multipart_form, content=true" }) - content: Uint8Array; - - @SpeakeasyMetadata({ data: "multipart_form, name=file" }) - file: string; -} - -/** - * ID of the model to use. Only `whisper-1` is currently available. - * - * @remarks - * - */ -export enum CreateTranscriptionRequestModel2 { - Whisper1 = "whisper-1", -} - -/** - * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. - * - * @remarks - * - */ -export enum CreateTranscriptionRequestResponseFormat { - Json = "json", - Text = "text", - Srt = "srt", - VerboseJson = "verbose_json", - Vtt = "vtt", -} - -export class CreateTranscriptionRequest extends SpeakeasyBase { - /** - * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, file=true" }) - file: CreateTranscriptionRequestFile; - - /** - * The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=language" }) - language?: string; - - /** - * ID of the model to use. Only `whisper-1` is currently available. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=model;json=true" }) - model: any; - - /** - * An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=prompt" }) - prompt?: string; - - /** - * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) - responseFormat?: CreateTranscriptionRequestResponseFormat; - - /** - * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=temperature" }) - temperature?: number; -} diff --git a/src/sdk/models/shared/createtranslationrequest.ts b/src/sdk/models/shared/createtranslationrequest.ts deleted file mode 100755 index f2efcb8..0000000 --- a/src/sdk/models/shared/createtranslationrequest.ts +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; - -export class CreateTranslationRequestFile extends SpeakeasyBase { - @SpeakeasyMetadata({ data: "multipart_form, content=true" }) - content: Uint8Array; - - @SpeakeasyMetadata({ data: "multipart_form, name=file" }) - file: string; -} - -/** - * ID of the model to use. Only `whisper-1` is currently available. - * - * @remarks - * - */ -export enum CreateTranslationRequestModel2 { - Whisper1 = "whisper-1", -} - -export class CreateTranslationRequest extends SpeakeasyBase { - /** - * The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, file=true" }) - file: CreateTranslationRequestFile; - - /** - * ID of the model to use. Only `whisper-1` is currently available. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=model;json=true" }) - model: any; - - /** - * An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=prompt" }) - prompt?: string; - - /** - * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) - responseFormat?: string; - - /** - * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. - * - * @remarks - * - */ - @SpeakeasyMetadata({ data: "multipart_form, name=temperature" }) - temperature?: number; -} diff --git a/src/sdk/models/shared/finetuningjob.ts b/src/sdk/models/shared/finetuningjob.ts index 4c15cb0..b7fb159 100755 --- a/src/sdk/models/shared/finetuningjob.ts +++ b/src/sdk/models/shared/finetuningjob.ts @@ -31,16 +31,6 @@ export class FineTuningJobError extends SpeakeasyBase { param: string; } -/** - * The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. - * - * @remarks - * "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. - */ -export enum FineTuningJobHyperparametersNEpochs1 { - Auto = "auto", -} - /** * The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. */ diff --git a/src/sdk/models/shared/index.ts b/src/sdk/models/shared/index.ts index e0ef1fd..5f70c40 100755 --- a/src/sdk/models/shared/index.ts +++ b/src/sdk/models/shared/index.ts @@ -2,7 +2,6 @@ * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. */ -export * from "./chatcompletionfunctioncalloption"; export * from "./chatcompletionfunctions"; export * from "./chatcompletionrequestmessage"; export * from "./chatcompletionresponsemessage"; @@ -13,9 +12,7 @@ export * from "./createcompletionrequest"; export * from "./createcompletionresponse"; export * from "./createeditrequest"; export * from "./createeditresponse"; -export * from "./createembeddingrequest"; export * from "./createembeddingresponse"; -export * from "./createfilerequest"; export * from "./createfinetunerequest"; export * from "./createfinetuningjobrequest"; export * from "./createimageeditrequest"; @@ -23,9 +20,7 @@ export * from "./createimagerequest"; export * from "./createimagevariationrequest"; export * from "./createmoderationrequest"; export * from "./createmoderationresponse"; -export * from "./createtranscriptionrequest"; export * from "./createtranscriptionresponse"; -export * from "./createtranslationrequest"; export * from "./createtranslationresponse"; export * from "./deletefileresponse"; export * from "./deletemodelresponse"; diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts index 308944d..452808f 100755 --- a/src/sdk/openai.ts +++ b/src/sdk/openai.ts @@ -441,13 +441,9 @@ export class OpenAI { * Creates an embedding vector representing the input text. */ async createEmbedding( - req: shared.CreateEmbeddingRequest, + req: Record, config?: AxiosRequestConfig ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateEmbeddingRequest(req); - } - const baseURL: string = utils.templateUrl( this.sdkConfiguration.serverURL, this.sdkConfiguration.serverDefaults @@ -530,13 +526,9 @@ export class OpenAI { * */ async createFile( - req: shared.CreateFileRequest, + req: Record, config?: AxiosRequestConfig ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateFileRequest(req); - } - const baseURL: string = utils.templateUrl( this.sdkConfiguration.serverURL, this.sdkConfiguration.serverDefaults @@ -1154,13 +1146,9 @@ export class OpenAI { * Transcribes audio into the input language. */ async createTranscription( - req: shared.CreateTranscriptionRequest, + req: Record, config?: AxiosRequestConfig ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateTranscriptionRequest(req); - } - const baseURL: string = utils.templateUrl( this.sdkConfiguration.serverURL, this.sdkConfiguration.serverDefaults @@ -1243,13 +1231,9 @@ export class OpenAI { * Translates audio into English. */ async createTranslation( - req: shared.CreateTranslationRequest, + req: Record, config?: AxiosRequestConfig ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateTranslationRequest(req); - } - const baseURL: string = utils.templateUrl( this.sdkConfiguration.serverURL, this.sdkConfiguration.serverDefaults diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 5cf6fbd..b22f7fc 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -21,6 +21,7 @@ export type SDKProps = { * The security details required to authenticate the SDK */ security?: shared.Security | (() => Promise); + /** * Allows overriding the default axios client used by the SDK */ @@ -48,9 +49,9 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.25.3"; - genVersion = "2.147.0"; - userAgent = "speakeasy-sdk/typescript 2.25.3 2.147.0 2.0.0 @speakeasy-api/openai"; + sdkVersion = "2.26.0"; + genVersion = "2.150.0"; + userAgent = "speakeasy-sdk/typescript 2.26.0 2.150.0 2.0.0 @speakeasy-api/openai"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From 510d767fadd91e498f287072de0c81db93743d50 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 11 Oct 2023 00:55:15 +0000 Subject: [PATCH 63/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.97.1 --- README.md | 109 +- RELEASES.md | 12 +- USAGE.md | 9 +- docs/models/shared/createembeddingrequest.md | 10 + docs/models/shared/createfilerequest.md | 9 + docs/models/shared/createfilerequestfile.md | 9 + docs/models/shared/createfinetunerequest.md | 2 +- .../createfinetunerequesthyperparameters.md | 10 + .../shared/createtranscriptionrequest.md | 13 + .../shared/createtranscriptionrequestfile.md | 9 + ...reatetranscriptionrequestresponseformat.md | 15 + .../models/shared/createtranslationrequest.md | 12 + .../shared/createtranslationrequestfile.md | 9 + docs/sdks/audio/README.md | 98 + docs/sdks/chat/README.md | 77 + docs/sdks/completions/README.md | 60 + docs/sdks/edits/README.md | 56 + docs/sdks/embeddings/README.md | 51 + docs/sdks/files/README.md | 212 ++ docs/sdks/finetunes/README.md | 249 ++ docs/sdks/finetuning/README.md | 234 ++ docs/sdks/images/README.md | 155 ++ docs/sdks/models/README.md | 126 + docs/sdks/moderations/README.md | 50 + docs/sdks/openai/README.md | 1260 --------- files.gen | 173 +- gen.yaml | 8 +- package-lock.json | 4 +- package.json | 7 +- src/sdk/audio.ts | 199 ++ src/sdk/chat.ts | 111 + src/sdk/completions.ts | 110 + src/sdk/edits.ts | 112 + src/sdk/embeddings.ts | 110 + src/sdk/files.ts | 383 +++ src/sdk/finetunes.ts | 405 +++ src/sdk/finetuning.ts | 428 ++++ src/sdk/images.ts | 287 +++ src/sdk/models.ts | 228 ++ .../models/shared/createembeddingrequest.ts | 38 + src/sdk/models/shared/createfilerequest.ts | 37 + .../models/shared/createfinetunerequest.ts | 37 +- .../shared/createtranscriptionrequest.ts | 83 + .../models/shared/createtranslationrequest.ts | 60 + src/sdk/models/shared/index.ts | 4 + src/sdk/moderations.ts | 110 + src/sdk/openai.ts | 2247 ----------------- src/sdk/sdk.ts | 74 +- 48 files changed, 4466 insertions(+), 3645 deletions(-) create mode 100755 docs/models/shared/createembeddingrequest.md create mode 100755 docs/models/shared/createfilerequest.md create mode 100755 docs/models/shared/createfilerequestfile.md create mode 100755 docs/models/shared/createfinetunerequesthyperparameters.md create mode 100755 docs/models/shared/createtranscriptionrequest.md create mode 100755 docs/models/shared/createtranscriptionrequestfile.md create mode 100755 docs/models/shared/createtranscriptionrequestresponseformat.md create mode 100755 docs/models/shared/createtranslationrequest.md create mode 100755 docs/models/shared/createtranslationrequestfile.md create mode 100755 docs/sdks/audio/README.md create mode 100755 docs/sdks/chat/README.md create mode 100755 docs/sdks/completions/README.md create mode 100755 docs/sdks/edits/README.md create mode 100755 docs/sdks/embeddings/README.md create mode 100755 docs/sdks/files/README.md create mode 100755 docs/sdks/finetunes/README.md create mode 100755 docs/sdks/finetuning/README.md create mode 100755 docs/sdks/images/README.md create mode 100755 docs/sdks/models/README.md create mode 100755 docs/sdks/moderations/README.md delete mode 100755 docs/sdks/openai/README.md create mode 100755 src/sdk/audio.ts create mode 100755 src/sdk/chat.ts create mode 100755 src/sdk/completions.ts create mode 100755 src/sdk/edits.ts create mode 100755 src/sdk/embeddings.ts create mode 100755 src/sdk/files.ts create mode 100755 src/sdk/finetunes.ts create mode 100755 src/sdk/finetuning.ts create mode 100755 src/sdk/images.ts create mode 100755 src/sdk/models.ts create mode 100755 src/sdk/models/shared/createembeddingrequest.ts create mode 100755 src/sdk/models/shared/createfilerequest.ts create mode 100755 src/sdk/models/shared/createtranscriptionrequest.ts create mode 100755 src/sdk/models/shared/createtranslationrequest.ts create mode 100755 src/sdk/moderations.ts delete mode 100755 src/sdk/openai.ts diff --git a/README.md b/README.md index 62d796a..4f3f49e 100755 --- a/README.md +++ b/README.md @@ -41,6 +41,7 @@ Authorization: Bearer YOUR_API_KEY ```typescript import { Gpt } from "@speakeasy-api/openai"; +import { CreateTranscriptionRequestResponseFormat } from "@speakeasy-api/openai/dist/sdk/models/shared"; (async() => { const sdk = new Gpt({ @@ -49,8 +50,12 @@ import { Gpt } from "@speakeasy-api/openai"; }, }); - const res = await sdk.openAI.cancelFineTune({ - fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + const res = await sdk.audio.createTranscription({ + file: { + content: "\#BbTW'zX9" as bytes <<<>>>, + file: "Buckinghamshire", + }, + model: "whisper-1", }); if (res.statusCode == 200) { @@ -64,59 +69,89 @@ import { Gpt } from "@speakeasy-api/openai"; ## Available Resources and Operations -### [openAI](docs/sdks/openai/README.md) +### [audio](docs/sdks/audio/README.md) -* [~~cancelFineTune~~](docs/sdks/openai/README.md#cancelfinetune) - Immediately cancel a fine-tune job. - :warning: **Deprecated** -* [cancelFineTuningJob](docs/sdks/openai/README.md#cancelfinetuningjob) - Immediately cancel a fine-tune job. +* [createTranscription](docs/sdks/audio/README.md#createtranscription) - Transcribes audio into the input language. +* [createTranslation](docs/sdks/audio/README.md#createtranslation) - Translates audio into English. + +### [chat](docs/sdks/chat/README.md) + +* [createChatCompletion](docs/sdks/chat/README.md#createchatcompletion) - Creates a model response for the given chat conversation. + +### [completions](docs/sdks/completions/README.md) + +* [createCompletion](docs/sdks/completions/README.md#createcompletion) - Creates a completion for the provided prompt and parameters. + +### [edits](docs/sdks/edits/README.md) + +* [~~createEdit~~](docs/sdks/edits/README.md#createedit) - Creates a new edit for the provided input, instruction, and parameters. :warning: **Deprecated** + +### [embeddings](docs/sdks/embeddings/README.md) -* [createChatCompletion](docs/sdks/openai/README.md#createchatcompletion) - Creates a model response for the given chat conversation. -* [createCompletion](docs/sdks/openai/README.md#createcompletion) - Creates a completion for the provided prompt and parameters. -* [~~createEdit~~](docs/sdks/openai/README.md#createedit) - Creates a new edit for the provided input, instruction, and parameters. :warning: **Deprecated** -* [createEmbedding](docs/sdks/openai/README.md#createembedding) - Creates an embedding vector representing the input text. -* [createFile](docs/sdks/openai/README.md#createfile) - Upload a file that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please [contact us](https://help.openai.com/) if you need to increase the storage limit. +* [createEmbedding](docs/sdks/embeddings/README.md#createembedding) - Creates an embedding vector representing the input text. -* [~~createFineTune~~](docs/sdks/openai/README.md#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. +### [files](docs/sdks/files/README.md) + +* [createFile](docs/sdks/files/README.md#createfile) - Upload a file that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please [contact us](https://help.openai.com/) if you need to increase the storage limit. + +* [deleteFile](docs/sdks/files/README.md#deletefile) - Delete a file. +* [downloadFile](docs/sdks/files/README.md#downloadfile) - Returns the contents of the specified file. +* [listFiles](docs/sdks/files/README.md#listfiles) - Returns a list of files that belong to the user's organization. +* [retrieveFile](docs/sdks/files/README.md#retrievefile) - Returns information about a specific file. + +### [fineTunes](docs/sdks/finetunes/README.md) + +* [~~cancelFineTune~~](docs/sdks/finetunes/README.md#cancelfinetune) - Immediately cancel a fine-tune job. + :warning: **Deprecated** +* [~~createFineTune~~](docs/sdks/finetunes/README.md#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) :warning: **Deprecated** -* [createFineTuningJob](docs/sdks/openai/README.md#createfinetuningjob) - Creates a job that fine-tunes a specified model from a given dataset. +* [~~listFineTuneEvents~~](docs/sdks/finetunes/README.md#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. + :warning: **Deprecated** +* [~~listFineTunes~~](docs/sdks/finetunes/README.md#listfinetunes) - List your organization's fine-tuning jobs + :warning: **Deprecated** +* [~~retrieveFineTune~~](docs/sdks/finetunes/README.md#retrievefinetune) - Gets info about the fine-tune job. + +[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + :warning: **Deprecated** + +### [fineTuning](docs/sdks/finetuning/README.md) + +* [cancelFineTuningJob](docs/sdks/finetuning/README.md#cancelfinetuningjob) - Immediately cancel a fine-tune job. + +* [createFineTuningJob](docs/sdks/finetuning/README.md#createfinetuningjob) - Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about fine-tuning](/docs/guides/fine-tuning) -* [createImage](docs/sdks/openai/README.md#createimage) - Creates an image given a prompt. -* [createImageEdit](docs/sdks/openai/README.md#createimageedit) - Creates an edited or extended image given an original image and a prompt. -* [createImageVariation](docs/sdks/openai/README.md#createimagevariation) - Creates a variation of a given image. -* [createModeration](docs/sdks/openai/README.md#createmoderation) - Classifies if text violates OpenAI's Content Policy -* [createTranscription](docs/sdks/openai/README.md#createtranscription) - Transcribes audio into the input language. -* [createTranslation](docs/sdks/openai/README.md#createtranslation) - Translates audio into English. -* [deleteFile](docs/sdks/openai/README.md#deletefile) - Delete a file. -* [deleteModel](docs/sdks/openai/README.md#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. -* [downloadFile](docs/sdks/openai/README.md#downloadfile) - Returns the contents of the specified file. -* [listFiles](docs/sdks/openai/README.md#listfiles) - Returns a list of files that belong to the user's organization. -* [~~listFineTuneEvents~~](docs/sdks/openai/README.md#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. - :warning: **Deprecated** -* [~~listFineTunes~~](docs/sdks/openai/README.md#listfinetunes) - List your organization's fine-tuning jobs - :warning: **Deprecated** -* [listFineTuningEvents](docs/sdks/openai/README.md#listfinetuningevents) - Get status updates for a fine-tuning job. - -* [listModels](docs/sdks/openai/README.md#listmodels) - Lists the currently available models, and provides basic information about each one such as the owner and availability. -* [listPaginatedFineTuningJobs](docs/sdks/openai/README.md#listpaginatedfinetuningjobs) - List your organization's fine-tuning jobs +* [listFineTuningEvents](docs/sdks/finetuning/README.md#listfinetuningevents) - Get status updates for a fine-tuning job. -* [retrieveFile](docs/sdks/openai/README.md#retrievefile) - Returns information about a specific file. -* [~~retrieveFineTune~~](docs/sdks/openai/README.md#retrievefinetune) - Gets info about the fine-tune job. +* [listPaginatedFineTuningJobs](docs/sdks/finetuning/README.md#listpaginatedfinetuningjobs) - List your organization's fine-tuning jobs -[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - :warning: **Deprecated** -* [retrieveFineTuningJob](docs/sdks/openai/README.md#retrievefinetuningjob) - Get info about a fine-tuning job. +* [retrieveFineTuningJob](docs/sdks/finetuning/README.md#retrievefinetuningjob) - Get info about a fine-tuning job. [Learn more about fine-tuning](/docs/guides/fine-tuning) -* [retrieveModel](docs/sdks/openai/README.md#retrievemodel) - Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + +### [images](docs/sdks/images/README.md) + +* [createImage](docs/sdks/images/README.md#createimage) - Creates an image given a prompt. +* [createImageEdit](docs/sdks/images/README.md#createimageedit) - Creates an edited or extended image given an original image and a prompt. +* [createImageVariation](docs/sdks/images/README.md#createimagevariation) - Creates a variation of a given image. + +### [models](docs/sdks/models/README.md) + +* [deleteModel](docs/sdks/models/README.md#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. +* [listModels](docs/sdks/models/README.md#listmodels) - Lists the currently available models, and provides basic information about each one such as the owner and availability. +* [retrieveModel](docs/sdks/models/README.md#retrievemodel) - Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + +### [moderations](docs/sdks/moderations/README.md) + +* [createModeration](docs/sdks/moderations/README.md#createmoderation) - Classifies if text violates OpenAI's Content Policy diff --git a/RELEASES.md b/RELEASES.md index fd23472..fe536d0 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -700,4 +700,14 @@ Based on: ### Generated - [typescript v2.26.0] . ### Releases -- [NPM v2.26.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.26.0 - . \ No newline at end of file +- [NPM v2.26.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.26.0 - . + +## 2023-10-11 00:54:51 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.97.1 (2.152.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.26.1] . +### Releases +- [NPM v2.26.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.26.1 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index f4ab472..f9ff825 100755 --- a/USAGE.md +++ b/USAGE.md @@ -3,6 +3,7 @@ ```typescript import { Gpt } from "@speakeasy-api/openai"; +import { CreateTranscriptionRequestResponseFormat } from "@speakeasy-api/openai/dist/sdk/models/shared"; (async() => { const sdk = new Gpt({ @@ -11,8 +12,12 @@ import { Gpt } from "@speakeasy-api/openai"; }, }); - const res = await sdk.openAI.cancelFineTune({ - fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + const res = await sdk.audio.createTranscription({ + file: { + content: "\#BbTW'zX9" as bytes <<<>>>, + file: "Buckinghamshire", + }, + model: "whisper-1", }); if (res.statusCode == 200) { diff --git a/docs/models/shared/createembeddingrequest.md b/docs/models/shared/createembeddingrequest.md new file mode 100755 index 0000000..b780407 --- /dev/null +++ b/docs/models/shared/createembeddingrequest.md @@ -0,0 +1,10 @@ +# CreateEmbeddingRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `input` | *any* | :heavy_check_mark: | Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
| The quick brown fox jumped over the lazy dog | +| `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
| text-embedding-ada-002 | +| `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | \ No newline at end of file diff --git a/docs/models/shared/createfilerequest.md b/docs/models/shared/createfilerequest.md new file mode 100755 index 0000000..b31af0f --- /dev/null +++ b/docs/models/shared/createfilerequest.md @@ -0,0 +1,9 @@ +# CreateFileRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [CreateFileRequestFile](../../models/shared/createfilerequestfile.md) | :heavy_check_mark: | The file object (not file name) to be uploaded.

If the `purpose` is set to "fine-tune", the file will be used for fine-tuning.
| +| `purpose` | *string* | :heavy_check_mark: | The intended purpose of the uploaded file.

Use "fine-tune" for [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file is correct for fine-tuning.
| \ No newline at end of file diff --git a/docs/models/shared/createfilerequestfile.md b/docs/models/shared/createfilerequestfile.md new file mode 100755 index 0000000..3bb80b5 --- /dev/null +++ b/docs/models/shared/createfilerequestfile.md @@ -0,0 +1,9 @@ +# CreateFileRequestFile + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `content` | *Uint8Array* | :heavy_check_mark: | N/A | +| `file` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createfinetunerequest.md b/docs/models/shared/createfinetunerequest.md index df76c1b..a6deb87 100755 --- a/docs/models/shared/createfinetunerequest.md +++ b/docs/models/shared/createfinetunerequest.md @@ -10,9 +10,9 @@ | `classificationNClasses` | *number* | :heavy_minus_sign: | The number of classes in a classification task.

This parameter is required for multiclass classification.
| | | `classificationPositiveClass` | *string* | :heavy_minus_sign: | The positive class in binary classification.

This parameter is needed to generate precision, recall, and F1
metrics when doing binary classification.
| | | `computeClassificationMetrics` | *boolean* | :heavy_minus_sign: | If set, we calculate classification-specific metrics such as accuracy
and F-1 score using the validation set at the end of every epoch.
These metrics can be viewed in the [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model).

In order to compute classification metrics, you must provide a
`validation_file`. Additionally, you must
specify `classification_n_classes` for multiclass classification or
`classification_positive_class` for binary classification.
| | +| `hyperparameters` | [CreateFineTuneRequestHyperparameters](../../models/shared/createfinetunerequesthyperparameters.md) | :heavy_minus_sign: | The hyperparameters used for the fine-tuning job. | | | `learningRateMultiplier` | *number* | :heavy_minus_sign: | The learning rate multiplier to use for training.
The fine-tuning learning rate is the original learning rate used for
pretraining multiplied by this value.

By default, the learning rate multiplier is the 0.05, 0.1, or 0.2
depending on final `batch_size` (larger learning rates tend to
perform better with larger batch sizes). We recommend experimenting
with values in the range 0.02 to 0.2 to see what produces the best
results.
| | | `model` | *any* | :heavy_minus_sign: | The name of the base model to fine-tune. You can select one of "ada",
"babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22.
To learn more about these models, see the
[Models](/docs/models) documentation.
| curie | -| `nEpochs` | *number* | :heavy_minus_sign: | The number of epochs to train the model for. An epoch refers to one
full cycle through the training dataset.
| | | `promptLossWeight` | *number* | :heavy_minus_sign: | The weight to use for loss on the prompt tokens. This controls how
much the model tries to learn to generate the prompt (as compared
to the completion which always has a weight of 1.0), and can add
a stabilizing effect to training when completions are short.

If prompts are extremely long (relative to completions), it may make
sense to reduce this weight so as to avoid over-prioritizing
learning the prompt.
| | | `suffix` | *string* | :heavy_minus_sign: | A string of up to 40 characters that will be added to your fine-tuned model name.

For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
| | | `trainingFile` | *string* | :heavy_check_mark: | The ID of an uploaded file that contains training data.

See [upload file](/docs/api-reference/files/upload) for how to upload a file.

Your dataset must be formatted as a JSONL file, where each training
example is a JSON object with the keys "prompt" and "completion".
Additionally, you must upload your file with the purpose `fine-tune`.

See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details.
| file-abc123 | diff --git a/docs/models/shared/createfinetunerequesthyperparameters.md b/docs/models/shared/createfinetunerequesthyperparameters.md new file mode 100755 index 0000000..004fcdc --- /dev/null +++ b/docs/models/shared/createfinetunerequesthyperparameters.md @@ -0,0 +1,10 @@ +# CreateFineTuneRequestHyperparameters + +The hyperparameters used for the fine-tuning job. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `nEpochs` | *any* | :heavy_minus_sign: | The number of epochs to train the model for. An epoch refers to one
full cycle through the training dataset.
| \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequest.md b/docs/models/shared/createtranscriptionrequest.md new file mode 100755 index 0000000..2f4e56a --- /dev/null +++ b/docs/models/shared/createtranscriptionrequest.md @@ -0,0 +1,13 @@ +# CreateTranscriptionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [CreateTranscriptionRequestFile](../../models/shared/createtranscriptionrequestfile.md) | :heavy_check_mark: | The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
| | +| `language` | *string* | :heavy_minus_sign: | The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
| | +| `model` | *any* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| whisper-1 | +| `prompt` | *string* | :heavy_minus_sign: | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
| | +| `responseFormat` | [CreateTranscriptionRequestResponseFormat](../../models/shared/createtranscriptionrequestresponseformat.md) | :heavy_minus_sign: | The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
| | +| `temperature` | *number* | :heavy_minus_sign: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
| | \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequestfile.md b/docs/models/shared/createtranscriptionrequestfile.md new file mode 100755 index 0000000..76b878c --- /dev/null +++ b/docs/models/shared/createtranscriptionrequestfile.md @@ -0,0 +1,9 @@ +# CreateTranscriptionRequestFile + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `content` | *Uint8Array* | :heavy_check_mark: | N/A | +| `file` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shared/createtranscriptionrequestresponseformat.md b/docs/models/shared/createtranscriptionrequestresponseformat.md new file mode 100755 index 0000000..13488d6 --- /dev/null +++ b/docs/models/shared/createtranscriptionrequestresponseformat.md @@ -0,0 +1,15 @@ +# CreateTranscriptionRequestResponseFormat + +The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `Json` | json | +| `Text` | text | +| `Srt` | srt | +| `VerboseJson` | verbose_json | +| `Vtt` | vtt | \ No newline at end of file diff --git a/docs/models/shared/createtranslationrequest.md b/docs/models/shared/createtranslationrequest.md new file mode 100755 index 0000000..a8274c4 --- /dev/null +++ b/docs/models/shared/createtranslationrequest.md @@ -0,0 +1,12 @@ +# CreateTranslationRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [CreateTranslationRequestFile](../../models/shared/createtranslationrequestfile.md) | :heavy_check_mark: | The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
| | +| `model` | *any* | :heavy_check_mark: | ID of the model to use. Only `whisper-1` is currently available.
| whisper-1 | +| `prompt` | *string* | :heavy_minus_sign: | An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
| | +| `responseFormat` | *string* | :heavy_minus_sign: | The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
| | +| `temperature` | *number* | :heavy_minus_sign: | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
| | \ No newline at end of file diff --git a/docs/models/shared/createtranslationrequestfile.md b/docs/models/shared/createtranslationrequestfile.md new file mode 100755 index 0000000..f143930 --- /dev/null +++ b/docs/models/shared/createtranslationrequestfile.md @@ -0,0 +1,9 @@ +# CreateTranslationRequestFile + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `content` | *Uint8Array* | :heavy_check_mark: | N/A | +| `file` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/sdks/audio/README.md b/docs/sdks/audio/README.md new file mode 100755 index 0000000..0241115 --- /dev/null +++ b/docs/sdks/audio/README.md @@ -0,0 +1,98 @@ +# Audio +(*audio*) + +## Overview + +Learn how to turn audio into text. + +### Available Operations + +* [createTranscription](#createtranscription) - Transcribes audio into the input language. +* [createTranslation](#createtranslation) - Translates audio into English. + +## createTranscription + +Transcribes audio into the input language. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateTranscriptionRequestResponseFormat } from "@speakeasy-api/openai/dist/sdk/models/shared"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.audio.createTranscription({ + file: { + content: "\#BbTW'zX9" as bytes <<<>>>, + file: "Buckinghamshire", + }, + model: "whisper-1", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `request` | [shared.CreateTranscriptionRequest](../../models/shared/createtranscriptionrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateTranscriptionResponse](../../models/operations/createtranscriptionresponse.md)>** + + +## createTranslation + +Translates audio into English. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.audio.createTranslation({ + file: { + content: "M57UL;W3rx" as bytes <<<>>>, + file: "Reggae Toys silver", + }, + model: "whisper-1", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `request` | [shared.CreateTranslationRequest](../../models/shared/createtranslationrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateTranslationResponse](../../models/operations/createtranslationresponse.md)>** + diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md new file mode 100755 index 0000000..b0eed08 --- /dev/null +++ b/docs/sdks/chat/README.md @@ -0,0 +1,77 @@ +# Chat +(*chat*) + +## Overview + +Given a list of messages comprising a conversation, the model will return a response. + +### Available Operations + +* [createChatCompletion](#createchatcompletion) - Creates a model response for the given chat conversation. + +## createChatCompletion + +Creates a model response for the given chat conversation. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { ChatCompletionRequestMessageRole } from "@speakeasy-api/openai/dist/sdk/models/shared"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.chat.createChatCompletion({ + functionCall: "Hybrid", + functions: [ + { + name: "Hoboken reinvent Web", + parameters: { + "Southeast": "International", + }, + }, + ], + logitBias: { + "incidunt": 432116, + }, + messages: [ + { + content: "abbreviate", + functionCall: { + arguments: "Directives Chair", + name: "Northeast frictionless Park", + }, + role: ChatCompletionRequestMessageRole.Assistant, + }, + ], + model: "gpt-3.5-turbo", + n: 1, + stop: "Future", + temperature: 1, + topP: 1, + user: "user-1234", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `request` | [shared.CreateChatCompletionRequest](../../models/shared/createchatcompletionrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateChatCompletionResponse](../../models/operations/createchatcompletionresponse.md)>** + diff --git a/docs/sdks/completions/README.md b/docs/sdks/completions/README.md new file mode 100755 index 0000000..bb78a63 --- /dev/null +++ b/docs/sdks/completions/README.md @@ -0,0 +1,60 @@ +# Completions +(*completions*) + +## Overview + +Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. + +### Available Operations + +* [createCompletion](#createcompletion) - Creates a completion for the provided prompt and parameters. + +## createCompletion + +Creates a completion for the provided prompt and parameters. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.completions.createCompletion({ + logitBias: { + "red": 242695, + }, + maxTokens: 16, + model: "Fresh", + n: 1, + prompt: "Reggae", + stop: "Fluorine", + suffix: "test.", + temperature: 1, + topP: 1, + user: "user-1234", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `request` | [shared.CreateCompletionRequest](../../models/shared/createcompletionrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateCompletionResponse](../../models/operations/createcompletionresponse.md)>** + diff --git a/docs/sdks/edits/README.md b/docs/sdks/edits/README.md new file mode 100755 index 0000000..2d663c9 --- /dev/null +++ b/docs/sdks/edits/README.md @@ -0,0 +1,56 @@ +# Edits +(*edits*) + +## Overview + +Given a prompt and an instruction, the model will return an edited version of the prompt. + +### Available Operations + +* [~~createEdit~~](#createedit) - Creates a new edit for the provided input, instruction, and parameters. :warning: **Deprecated** + +## ~~createEdit~~ + +Creates a new edit for the provided input, instruction, and parameters. + +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.edits.createEdit({ + input: "What day of the wek is it?", + instruction: "Fix the spelling mistakes.", + model: "text-davinci-edit-001", + n: 1, + temperature: 1, + topP: 1, + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `request` | [shared.CreateEditRequest](../../models/shared/createeditrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateEditResponse](../../models/operations/createeditresponse.md)>** + diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md new file mode 100755 index 0000000..d49752b --- /dev/null +++ b/docs/sdks/embeddings/README.md @@ -0,0 +1,51 @@ +# Embeddings +(*embeddings*) + +## Overview + +Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. + +### Available Operations + +* [createEmbedding](#createembedding) - Creates an embedding vector representing the input text. + +## createEmbedding + +Creates an embedding vector representing the input text. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.embeddings.createEmbedding({ + input: "The quick brown fox jumped over the lazy dog", + model: "text-embedding-ada-002", + user: "user-1234", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `request` | [shared.CreateEmbeddingRequest](../../models/shared/createembeddingrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateEmbeddingResponse](../../models/operations/createembeddingresponse.md)>** + diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md new file mode 100755 index 0000000..254f81c --- /dev/null +++ b/docs/sdks/files/README.md @@ -0,0 +1,212 @@ +# Files +(*files*) + +## Overview + +Files are used to upload documents that can be used with features like fine-tuning. + +### Available Operations + +* [createFile](#createfile) - Upload a file that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please [contact us](https://help.openai.com/) if you need to increase the storage limit. + +* [deleteFile](#deletefile) - Delete a file. +* [downloadFile](#downloadfile) - Returns the contents of the specified file. +* [listFiles](#listfiles) - Returns a list of files that belong to the user's organization. +* [retrieveFile](#retrievefile) - Returns information about a specific file. + +## createFile + +Upload a file that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please [contact us](https://help.openai.com/) if you need to increase the storage limit. + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.files.createFile({ + file: { + content: "`'$Z`(L/RH" as bytes <<<>>>, + file: "Rap National", + }, + purpose: "Female synergistic Maine", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `request` | [shared.CreateFileRequest](../../models/shared/createfilerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateFileResponse](../../models/operations/createfileresponse.md)>** + + +## deleteFile + +Delete a file. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.files.deleteFile({ + fileId: "yellow kiddingly white", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `request` | [operations.DeleteFileRequest](../../models/operations/deletefilerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.DeleteFileResponse](../../models/operations/deletefileresponse.md)>** + + +## downloadFile + +Returns the contents of the specified file. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.files.downloadFile({ + fileId: "Maserati Bronze Audi", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `request` | [operations.DownloadFileRequest](../../models/operations/downloadfilerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.DownloadFileResponse](../../models/operations/downloadfileresponse.md)>** + + +## listFiles + +Returns a list of files that belong to the user's organization. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.files.listFiles(); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.ListFilesResponse](../../models/operations/listfilesresponse.md)>** + + +## retrieveFile + +Returns information about a specific file. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.files.retrieveFile({ + fileId: "online Facilitator enfold", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `request` | [operations.RetrieveFileRequest](../../models/operations/retrievefilerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.RetrieveFileResponse](../../models/operations/retrievefileresponse.md)>** + diff --git a/docs/sdks/finetunes/README.md b/docs/sdks/finetunes/README.md new file mode 100755 index 0000000..dfa4760 --- /dev/null +++ b/docs/sdks/finetunes/README.md @@ -0,0 +1,249 @@ +# FineTunes +(*fineTunes*) + +## Overview + +Manage legacy fine-tuning jobs to tailor a model to your specific training data. + +### Available Operations + +* [~~cancelFineTune~~](#cancelfinetune) - Immediately cancel a fine-tune job. + :warning: **Deprecated** +* [~~createFineTune~~](#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. + +Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + +[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + :warning: **Deprecated** +* [~~listFineTuneEvents~~](#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. + :warning: **Deprecated** +* [~~listFineTunes~~](#listfinetunes) - List your organization's fine-tuning jobs + :warning: **Deprecated** +* [~~retrieveFineTune~~](#retrievefinetune) - Gets info about the fine-tune job. + +[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + :warning: **Deprecated** + +## ~~cancelFineTune~~ + +Immediately cancel a fine-tune job. + + +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.fineTunes.cancelFineTune({ + fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `request` | [operations.CancelFineTuneRequest](../../models/operations/cancelfinetunerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CancelFineTuneResponse](../../models/operations/cancelfinetuneresponse.md)>** + + +## ~~createFineTune~~ + +Creates a job that fine-tunes a specified model from a given dataset. + +Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + +[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + + +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.fineTunes.createFineTune({ + classificationBetas: [ + 0.6, + 1, + 1.5, + 2, + ], + hyperparameters: { + nEpochs: "plum", + }, + model: "curie", + trainingFile: "file-abc123", + validationFile: "file-abc123", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `request` | [shared.CreateFineTuneRequest](../../models/shared/createfinetunerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateFineTuneResponse](../../models/operations/createfinetuneresponse.md)>** + + +## ~~listFineTuneEvents~~ + +Get fine-grained status updates for a fine-tune job. + + +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.fineTunes.listFineTuneEvents({ + fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | +| `request` | [operations.ListFineTuneEventsRequest](../../models/operations/listfinetuneeventsrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.ListFineTuneEventsResponse](../../models/operations/listfinetuneeventsresponse.md)>** + + +## ~~listFineTunes~~ + +List your organization's fine-tuning jobs + + +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.fineTunes.listFineTunes(); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.ListFineTunesResponse](../../models/operations/listfinetunesresponse.md)>** + + +## ~~retrieveFineTune~~ + +Gets info about the fine-tune job. + +[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + + +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.fineTunes.retrieveFineTune({ + fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `request` | [operations.RetrieveFineTuneRequest](../../models/operations/retrievefinetunerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.RetrieveFineTuneResponse](../../models/operations/retrievefinetuneresponse.md)>** + diff --git a/docs/sdks/finetuning/README.md b/docs/sdks/finetuning/README.md new file mode 100755 index 0000000..3cbfb74 --- /dev/null +++ b/docs/sdks/finetuning/README.md @@ -0,0 +1,234 @@ +# FineTuning +(*fineTuning*) + +## Overview + +Manage fine-tuning jobs to tailor a model to your specific training data. + +### Available Operations + +* [cancelFineTuningJob](#cancelfinetuningjob) - Immediately cancel a fine-tune job. + +* [createFineTuningJob](#createfinetuningjob) - Creates a job that fine-tunes a specified model from a given dataset. + +Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + +[Learn more about fine-tuning](/docs/guides/fine-tuning) + +* [listFineTuningEvents](#listfinetuningevents) - Get status updates for a fine-tuning job. + +* [listPaginatedFineTuningJobs](#listpaginatedfinetuningjobs) - List your organization's fine-tuning jobs + +* [retrieveFineTuningJob](#retrievefinetuningjob) - Get info about a fine-tuning job. + +[Learn more about fine-tuning](/docs/guides/fine-tuning) + + +## cancelFineTuningJob + +Immediately cancel a fine-tune job. + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.fineTuning.cancelFineTuningJob({ + fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | +| `request` | [operations.CancelFineTuningJobRequest](../../models/operations/cancelfinetuningjobrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CancelFineTuningJobResponse](../../models/operations/cancelfinetuningjobresponse.md)>** + + +## createFineTuningJob + +Creates a job that fine-tunes a specified model from a given dataset. + +Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + +[Learn more about fine-tuning](/docs/guides/fine-tuning) + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.fineTuning.createFineTuningJob({ + hyperparameters: { + nEpochs: "empower", + }, + model: "gpt-3.5-turbo", + trainingFile: "file-abc123", + validationFile: "file-abc123", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `request` | [shared.CreateFineTuningJobRequest](../../models/shared/createfinetuningjobrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateFineTuningJobResponse](../../models/operations/createfinetuningjobresponse.md)>** + + +## listFineTuningEvents + +Get status updates for a fine-tuning job. + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.fineTuning.listFineTuningEvents({ + fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | +| `request` | [operations.ListFineTuningEventsRequest](../../models/operations/listfinetuningeventsrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.ListFineTuningEventsResponse](../../models/operations/listfinetuningeventsresponse.md)>** + + +## listPaginatedFineTuningJobs + +List your organization's fine-tuning jobs + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.fineTuning.listPaginatedFineTuningJobs({}); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | +| `request` | [operations.ListPaginatedFineTuningJobsRequest](../../models/operations/listpaginatedfinetuningjobsrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.ListPaginatedFineTuningJobsResponse](../../models/operations/listpaginatedfinetuningjobsresponse.md)>** + + +## retrieveFineTuningJob + +Get info about a fine-tuning job. + +[Learn more about fine-tuning](/docs/guides/fine-tuning) + + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.fineTuning.retrieveFineTuningJob({ + fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | +| `request` | [operations.RetrieveFineTuningJobRequest](../../models/operations/retrievefinetuningjobrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.RetrieveFineTuningJobResponse](../../models/operations/retrievefinetuningjobresponse.md)>** + diff --git a/docs/sdks/images/README.md b/docs/sdks/images/README.md new file mode 100755 index 0000000..0f3d3b1 --- /dev/null +++ b/docs/sdks/images/README.md @@ -0,0 +1,155 @@ +# Images +(*images*) + +## Overview + +Given a prompt and/or an input image, the model will generate a new image. + +### Available Operations + +* [createImage](#createimage) - Creates an image given a prompt. +* [createImageEdit](#createimageedit) - Creates an edited or extended image given an original image and a prompt. +* [createImageVariation](#createimagevariation) - Creates a variation of a given image. + +## createImage + +Creates an image given a prompt. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateImageRequestResponseFormat, CreateImageRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.images.createImage({ + n: 1, + prompt: "A cute baby sea otter", + responseFormat: CreateImageRequestResponseFormat.Url, + size: CreateImageRequestSize.OneThousandAndTwentyFourx1024, + user: "user-1234", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `request` | [shared.CreateImageRequest](../../models/shared/createimagerequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateImageResponse](../../models/operations/createimageresponse.md)>** + + +## createImageEdit + +Creates an edited or extended image given an original image and a prompt. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateImageEditRequestResponseFormat, CreateImageEditRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.images.createImageEdit({ + image: { + content: "0]/(|3W_T9" as bytes <<<>>>, + image: "https://loremflickr.com/640/480", + }, + mask: { + content: "`^YjrpxopK" as bytes <<<>>>, + mask: "Rap Dodge Incredible", + }, + n: 1, + prompt: "A cute baby sea otter wearing a beret", + responseFormat: CreateImageEditRequestResponseFormat.Url, + size: CreateImageEditRequestSize.OneThousandAndTwentyFourx1024, + user: "user-1234", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `request` | [shared.CreateImageEditRequest](../../models/shared/createimageeditrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateImageEditResponse](../../models/operations/createimageeditresponse.md)>** + + +## createImageVariation + +Creates a variation of a given image. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; +import { CreateImageVariationRequestResponseFormat, CreateImageVariationRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.images.createImageVariation({ + image: { + content: "`YY7PCrWuK" as bytes <<<>>>, + image: "https://loremflickr.com/640/480", + }, + n: 1, + responseFormat: CreateImageVariationRequestResponseFormat.Url, + size: CreateImageVariationRequestSize.OneThousandAndTwentyFourx1024, + user: "user-1234", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `request` | [shared.CreateImageVariationRequest](../../models/shared/createimagevariationrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateImageVariationResponse](../../models/operations/createimagevariationresponse.md)>** + diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md new file mode 100755 index 0000000..3db8acc --- /dev/null +++ b/docs/sdks/models/README.md @@ -0,0 +1,126 @@ +# Models +(*models*) + +## Overview + +List and describe the various models available in the API. + +### Available Operations + +* [deleteModel](#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. +* [listModels](#listmodels) - Lists the currently available models, and provides basic information about each one such as the owner and availability. +* [retrieveModel](#retrievemodel) - Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + +## deleteModel + +Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.models.deleteModel({ + model: "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `request` | [operations.DeleteModelRequest](../../models/operations/deletemodelrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.DeleteModelResponse](../../models/operations/deletemodelresponse.md)>** + + +## listModels + +Lists the currently available models, and provides basic information about each one such as the owner and availability. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.models.listModels(); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.ListModelsResponse](../../models/operations/listmodelsresponse.md)>** + + +## retrieveModel + +Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.models.retrieveModel({ + model: "gpt-3.5-turbo", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `request` | [operations.RetrieveModelRequest](../../models/operations/retrievemodelrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.RetrieveModelResponse](../../models/operations/retrievemodelresponse.md)>** + diff --git a/docs/sdks/moderations/README.md b/docs/sdks/moderations/README.md new file mode 100755 index 0000000..699ca4d --- /dev/null +++ b/docs/sdks/moderations/README.md @@ -0,0 +1,50 @@ +# Moderations +(*moderations*) + +## Overview + +Given a input text, outputs if the model classifies it as violating OpenAI's content policy. + +### Available Operations + +* [createModeration](#createmoderation) - Classifies if text violates OpenAI's Content Policy + +## createModeration + +Classifies if text violates OpenAI's Content Policy + +### Example Usage + +```typescript +import { Gpt } from "@speakeasy-api/openai"; + +(async() => { + const sdk = new Gpt({ + security: { + apiKeyAuth: "", + }, + }); + + const res = await sdk.moderations.createModeration({ + input: "stable", + model: "text-moderation-stable", + }); + + if (res.statusCode == 200) { + // handle response + } +})(); +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `request` | [shared.CreateModerationRequest](../../models/shared/createmoderationrequest.md) | :heavy_check_mark: | The request object to use for the request. | +| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | + + +### Response + +**Promise<[operations.CreateModerationResponse](../../models/operations/createmoderationresponse.md)>** + diff --git a/docs/sdks/openai/README.md b/docs/sdks/openai/README.md deleted file mode 100755 index feb8479..0000000 --- a/docs/sdks/openai/README.md +++ /dev/null @@ -1,1260 +0,0 @@ -# OpenAI -(*openAI*) - -## Overview - -The OpenAI REST API - -### Available Operations - -* [~~cancelFineTune~~](#cancelfinetune) - Immediately cancel a fine-tune job. - :warning: **Deprecated** -* [cancelFineTuningJob](#cancelfinetuningjob) - Immediately cancel a fine-tune job. - -* [createChatCompletion](#createchatcompletion) - Creates a model response for the given chat conversation. -* [createCompletion](#createcompletion) - Creates a completion for the provided prompt and parameters. -* [~~createEdit~~](#createedit) - Creates a new edit for the provided input, instruction, and parameters. :warning: **Deprecated** -* [createEmbedding](#createembedding) - Creates an embedding vector representing the input text. -* [createFile](#createfile) - Upload a file that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please [contact us](https://help.openai.com/) if you need to increase the storage limit. - -* [~~createFineTune~~](#createfinetune) - Creates a job that fine-tunes a specified model from a given dataset. - -Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - -[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - :warning: **Deprecated** -* [createFineTuningJob](#createfinetuningjob) - Creates a job that fine-tunes a specified model from a given dataset. - -Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - -[Learn more about fine-tuning](/docs/guides/fine-tuning) - -* [createImage](#createimage) - Creates an image given a prompt. -* [createImageEdit](#createimageedit) - Creates an edited or extended image given an original image and a prompt. -* [createImageVariation](#createimagevariation) - Creates a variation of a given image. -* [createModeration](#createmoderation) - Classifies if text violates OpenAI's Content Policy -* [createTranscription](#createtranscription) - Transcribes audio into the input language. -* [createTranslation](#createtranslation) - Translates audio into English. -* [deleteFile](#deletefile) - Delete a file. -* [deleteModel](#deletemodel) - Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. -* [downloadFile](#downloadfile) - Returns the contents of the specified file. -* [listFiles](#listfiles) - Returns a list of files that belong to the user's organization. -* [~~listFineTuneEvents~~](#listfinetuneevents) - Get fine-grained status updates for a fine-tune job. - :warning: **Deprecated** -* [~~listFineTunes~~](#listfinetunes) - List your organization's fine-tuning jobs - :warning: **Deprecated** -* [listFineTuningEvents](#listfinetuningevents) - Get status updates for a fine-tuning job. - -* [listModels](#listmodels) - Lists the currently available models, and provides basic information about each one such as the owner and availability. -* [listPaginatedFineTuningJobs](#listpaginatedfinetuningjobs) - List your organization's fine-tuning jobs - -* [retrieveFile](#retrievefile) - Returns information about a specific file. -* [~~retrieveFineTune~~](#retrievefinetune) - Gets info about the fine-tune job. - -[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - :warning: **Deprecated** -* [retrieveFineTuningJob](#retrievefinetuningjob) - Get info about a fine-tuning job. - -[Learn more about fine-tuning](/docs/guides/fine-tuning) - -* [retrieveModel](#retrievemodel) - Retrieves a model instance, providing basic information about the model such as the owner and permissioning. - -## ~~cancelFineTune~~ - -Immediately cancel a fine-tune job. - - -> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.cancelFineTune({ - fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `request` | [operations.CancelFineTuneRequest](../../models/operations/cancelfinetunerequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CancelFineTuneResponse](../../models/operations/cancelfinetuneresponse.md)>** - - -## cancelFineTuningJob - -Immediately cancel a fine-tune job. - - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.cancelFineTuningJob({ - fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | -| `request` | [operations.CancelFineTuningJobRequest](../../models/operations/cancelfinetuningjobrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CancelFineTuningJobResponse](../../models/operations/cancelfinetuningjobresponse.md)>** - - -## createChatCompletion - -Creates a model response for the given chat conversation. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { ChatCompletionRequestMessageRole } from "@speakeasy-api/openai/dist/sdk/models/shared"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.createChatCompletion({ - functionCall: "Hybrid", - functions: [ - { - name: "Hoboken reinvent Web", - parameters: { - "Southeast": "International", - }, - }, - ], - logitBias: { - "incidunt": 432116, - }, - messages: [ - { - content: "abbreviate", - functionCall: { - arguments: "Directives Chair", - name: "Northeast frictionless Park", - }, - role: ChatCompletionRequestMessageRole.Assistant, - }, - ], - model: "gpt-3.5-turbo", - n: 1, - stop: "Future", - temperature: 1, - topP: 1, - user: "user-1234", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `request` | [shared.CreateChatCompletionRequest](../../models/shared/createchatcompletionrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateChatCompletionResponse](../../models/operations/createchatcompletionresponse.md)>** - - -## createCompletion - -Creates a completion for the provided prompt and parameters. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.createCompletion({ - logitBias: { - "red": 242695, - }, - maxTokens: 16, - model: "Fresh", - n: 1, - prompt: "Reggae", - stop: "Fluorine", - suffix: "test.", - temperature: 1, - topP: 1, - user: "user-1234", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `request` | [shared.CreateCompletionRequest](../../models/shared/createcompletionrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateCompletionResponse](../../models/operations/createcompletionresponse.md)>** - - -## ~~createEdit~~ - -Creates a new edit for the provided input, instruction, and parameters. - -> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.createEdit({ - input: "What day of the wek is it?", - instruction: "Fix the spelling mistakes.", - model: "text-davinci-edit-001", - n: 1, - temperature: 1, - topP: 1, - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `request` | [shared.CreateEditRequest](../../models/shared/createeditrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateEditResponse](../../models/operations/createeditresponse.md)>** - - -## createEmbedding - -Creates an embedding vector representing the input text. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.createEmbedding({ - "chief": "compressing", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `request` | [Record](../../models//.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateEmbeddingResponse](../../models/operations/createembeddingresponse.md)>** - - -## createFile - -Upload a file that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please [contact us](https://help.openai.com/) if you need to increase the storage limit. - - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.createFile({ - "Associate": "Miami", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `request` | [Record](../../models//.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateFileResponse](../../models/operations/createfileresponse.md)>** - - -## ~~createFineTune~~ - -Creates a job that fine-tunes a specified model from a given dataset. - -Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - -[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - - -> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.createFineTune({ - classificationBetas: [ - 0.6, - 1, - 1.5, - 2, - ], - model: "curie", - trainingFile: "file-abc123", - validationFile: "file-abc123", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `request` | [shared.CreateFineTuneRequest](../../models/shared/createfinetunerequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateFineTuneResponse](../../models/operations/createfinetuneresponse.md)>** - - -## createFineTuningJob - -Creates a job that fine-tunes a specified model from a given dataset. - -Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - -[Learn more about fine-tuning](/docs/guides/fine-tuning) - - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.createFineTuningJob({ - hyperparameters: { - nEpochs: "empower", - }, - model: "gpt-3.5-turbo", - trainingFile: "file-abc123", - validationFile: "file-abc123", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `request` | [shared.CreateFineTuningJobRequest](../../models/shared/createfinetuningjobrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateFineTuningJobResponse](../../models/operations/createfinetuningjobresponse.md)>** - - -## createImage - -Creates an image given a prompt. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateImageRequestResponseFormat, CreateImageRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.createImage({ - n: 1, - prompt: "A cute baby sea otter", - responseFormat: CreateImageRequestResponseFormat.Url, - size: CreateImageRequestSize.OneThousandAndTwentyFourx1024, - user: "user-1234", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `request` | [shared.CreateImageRequest](../../models/shared/createimagerequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateImageResponse](../../models/operations/createimageresponse.md)>** - - -## createImageEdit - -Creates an edited or extended image given an original image and a prompt. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateImageEditRequestResponseFormat, CreateImageEditRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.createImageEdit({ - image: { - content: "0]/(|3W_T9" as bytes <<<>>>, - image: "https://loremflickr.com/640/480", - }, - mask: { - content: "`^YjrpxopK" as bytes <<<>>>, - mask: "Rap Dodge Incredible", - }, - n: 1, - prompt: "A cute baby sea otter wearing a beret", - responseFormat: CreateImageEditRequestResponseFormat.Url, - size: CreateImageEditRequestSize.OneThousandAndTwentyFourx1024, - user: "user-1234", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `request` | [shared.CreateImageEditRequest](../../models/shared/createimageeditrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateImageEditResponse](../../models/operations/createimageeditresponse.md)>** - - -## createImageVariation - -Creates a variation of a given image. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; -import { CreateImageVariationRequestResponseFormat, CreateImageVariationRequestSize } from "@speakeasy-api/openai/dist/sdk/models/shared"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.createImageVariation({ - image: { - content: "`YY7PCrWuK" as bytes <<<>>>, - image: "https://loremflickr.com/640/480", - }, - n: 1, - responseFormat: CreateImageVariationRequestResponseFormat.Url, - size: CreateImageVariationRequestSize.OneThousandAndTwentyFourx1024, - user: "user-1234", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `request` | [shared.CreateImageVariationRequest](../../models/shared/createimagevariationrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateImageVariationResponse](../../models/operations/createimagevariationresponse.md)>** - - -## createModeration - -Classifies if text violates OpenAI's Content Policy - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.createModeration({ - input: "stable", - model: "text-moderation-stable", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `request` | [shared.CreateModerationRequest](../../models/shared/createmoderationrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateModerationResponse](../../models/operations/createmoderationresponse.md)>** - - -## createTranscription - -Transcribes audio into the input language. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.createTranscription({ - "Lead": "neutral", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `request` | [Record](../../models//.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateTranscriptionResponse](../../models/operations/createtranscriptionresponse.md)>** - - -## createTranslation - -Translates audio into English. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.createTranslation({ - "DRAM": "Granite", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `request` | [Record](../../models//.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.CreateTranslationResponse](../../models/operations/createtranslationresponse.md)>** - - -## deleteFile - -Delete a file. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.deleteFile({ - fileId: "yellow kiddingly white", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `request` | [operations.DeleteFileRequest](../../models/operations/deletefilerequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.DeleteFileResponse](../../models/operations/deletefileresponse.md)>** - - -## deleteModel - -Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.deleteModel({ - model: "ft:gpt-3.5-turbo:acemeco:suffix:abc123", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `request` | [operations.DeleteModelRequest](../../models/operations/deletemodelrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.DeleteModelResponse](../../models/operations/deletemodelresponse.md)>** - - -## downloadFile - -Returns the contents of the specified file. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.downloadFile({ - fileId: "Maserati Bronze Audi", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `request` | [operations.DownloadFileRequest](../../models/operations/downloadfilerequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.DownloadFileResponse](../../models/operations/downloadfileresponse.md)>** - - -## listFiles - -Returns a list of files that belong to the user's organization. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.listFiles(); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.ListFilesResponse](../../models/operations/listfilesresponse.md)>** - - -## ~~listFineTuneEvents~~ - -Get fine-grained status updates for a fine-tune job. - - -> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.listFineTuneEvents({ - fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -| `request` | [operations.ListFineTuneEventsRequest](../../models/operations/listfinetuneeventsrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.ListFineTuneEventsResponse](../../models/operations/listfinetuneeventsresponse.md)>** - - -## ~~listFineTunes~~ - -List your organization's fine-tuning jobs - - -> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.listFineTunes(); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.ListFineTunesResponse](../../models/operations/listfinetunesresponse.md)>** - - -## listFineTuningEvents - -Get status updates for a fine-tuning job. - - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.listFineTuningEvents({ - fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | -| `request` | [operations.ListFineTuningEventsRequest](../../models/operations/listfinetuningeventsrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.ListFineTuningEventsResponse](../../models/operations/listfinetuningeventsresponse.md)>** - - -## listModels - -Lists the currently available models, and provides basic information about each one such as the owner and availability. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.listModels(); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.ListModelsResponse](../../models/operations/listmodelsresponse.md)>** - - -## listPaginatedFineTuningJobs - -List your organization's fine-tuning jobs - - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.listPaginatedFineTuningJobs({}); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | -| `request` | [operations.ListPaginatedFineTuningJobsRequest](../../models/operations/listpaginatedfinetuningjobsrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.ListPaginatedFineTuningJobsResponse](../../models/operations/listpaginatedfinetuningjobsresponse.md)>** - - -## retrieveFile - -Returns information about a specific file. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.retrieveFile({ - fileId: "online Facilitator enfold", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `request` | [operations.RetrieveFileRequest](../../models/operations/retrievefilerequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.RetrieveFileResponse](../../models/operations/retrievefileresponse.md)>** - - -## ~~retrieveFineTune~~ - -Gets info about the fine-tune job. - -[Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - - -> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.retrieveFineTune({ - fineTuneId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `request` | [operations.RetrieveFineTuneRequest](../../models/operations/retrievefinetunerequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.RetrieveFineTuneResponse](../../models/operations/retrievefinetuneresponse.md)>** - - -## retrieveFineTuningJob - -Get info about a fine-tuning job. - -[Learn more about fine-tuning](/docs/guides/fine-tuning) - - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.retrieveFineTuningJob({ - fineTuningJobId: "ft-AF1WoRqd3aJAHsqc9NY7iL8F", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `request` | [operations.RetrieveFineTuningJobRequest](../../models/operations/retrievefinetuningjobrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.RetrieveFineTuningJobResponse](../../models/operations/retrievefinetuningjobresponse.md)>** - - -## retrieveModel - -Retrieves a model instance, providing basic information about the model such as the owner and permissioning. - -### Example Usage - -```typescript -import { Gpt } from "@speakeasy-api/openai"; - -(async() => { - const sdk = new Gpt({ - security: { - apiKeyAuth: "", - }, - }); - - const res = await sdk.openAI.retrieveModel({ - model: "gpt-3.5-turbo", - }); - - if (res.statusCode == 200) { - // handle response - } -})(); -``` - -### Parameters - -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `request` | [operations.RetrieveModelRequest](../../models/operations/retrievemodelrequest.md) | :heavy_check_mark: | The request object to use for the request. | -| `config` | [AxiosRequestConfig](https://axios-http.com/docs/req_config) | :heavy_minus_sign: | Available config options for making requests. | - - -### Response - -**Promise<[operations.RetrieveModelResponse](../../models/operations/retrievemodelresponse.md)>** - diff --git a/files.gen b/files.gen index 51f43aa..1099008 100755 --- a/files.gen +++ b/files.gen @@ -1,4 +1,14 @@ -src/sdk/openai.ts +src/sdk/audio.ts +src/sdk/chat.ts +src/sdk/completions.ts +src/sdk/edits.ts +src/sdk/embeddings.ts +src/sdk/files.ts +src/sdk/finetunes.ts +src/sdk/finetuning.ts +src/sdk/images.ts +src/sdk/models.ts +src/sdk/moderations.ts src/sdk/sdk.ts .eslintrc.yml jest.config.js @@ -19,39 +29,39 @@ src/sdk/models/errors/sdkerror.ts src/sdk/types/index.ts src/sdk/types/rfcdate.ts tsconfig.json -src/sdk/models/operations/cancelfinetune.ts -src/sdk/models/operations/cancelfinetuningjob.ts +src/sdk/models/operations/createtranscription.ts +src/sdk/models/operations/createtranslation.ts src/sdk/models/operations/createchatcompletion.ts src/sdk/models/operations/createcompletion.ts src/sdk/models/operations/createedit.ts src/sdk/models/operations/createembedding.ts src/sdk/models/operations/createfile.ts -src/sdk/models/operations/createfinetune.ts -src/sdk/models/operations/createfinetuningjob.ts -src/sdk/models/operations/createimage.ts -src/sdk/models/operations/createimageedit.ts -src/sdk/models/operations/createimagevariation.ts -src/sdk/models/operations/createmoderation.ts -src/sdk/models/operations/createtranscription.ts -src/sdk/models/operations/createtranslation.ts src/sdk/models/operations/deletefile.ts -src/sdk/models/operations/deletemodel.ts src/sdk/models/operations/downloadfile.ts src/sdk/models/operations/listfiles.ts +src/sdk/models/operations/retrievefile.ts +src/sdk/models/operations/cancelfinetune.ts +src/sdk/models/operations/createfinetune.ts src/sdk/models/operations/listfinetuneevents.ts src/sdk/models/operations/listfinetunes.ts +src/sdk/models/operations/retrievefinetune.ts +src/sdk/models/operations/cancelfinetuningjob.ts +src/sdk/models/operations/createfinetuningjob.ts src/sdk/models/operations/listfinetuningevents.ts -src/sdk/models/operations/listmodels.ts src/sdk/models/operations/listpaginatedfinetuningjobs.ts -src/sdk/models/operations/retrievefile.ts -src/sdk/models/operations/retrievefinetune.ts src/sdk/models/operations/retrievefinetuningjob.ts +src/sdk/models/operations/createimage.ts +src/sdk/models/operations/createimageedit.ts +src/sdk/models/operations/createimagevariation.ts +src/sdk/models/operations/deletemodel.ts +src/sdk/models/operations/listmodels.ts src/sdk/models/operations/retrievemodel.ts +src/sdk/models/operations/createmoderation.ts src/sdk/models/operations/index.ts -src/sdk/models/shared/finetune.ts -src/sdk/models/shared/openaifile.ts -src/sdk/models/shared/finetuneevent.ts -src/sdk/models/shared/finetuningjob.ts +src/sdk/models/shared/createtranscriptionresponse.ts +src/sdk/models/shared/createtranscriptionrequest.ts +src/sdk/models/shared/createtranslationresponse.ts +src/sdk/models/shared/createtranslationrequest.ts src/sdk/models/shared/createchatcompletionresponse.ts src/sdk/models/shared/completionusage.ts src/sdk/models/shared/chatcompletionresponsemessage.ts @@ -64,78 +74,82 @@ src/sdk/models/shared/createeditresponse.ts src/sdk/models/shared/createeditrequest.ts src/sdk/models/shared/createembeddingresponse.ts src/sdk/models/shared/embedding.ts +src/sdk/models/shared/createembeddingrequest.ts +src/sdk/models/shared/openaifile.ts +src/sdk/models/shared/createfilerequest.ts +src/sdk/models/shared/deletefileresponse.ts +src/sdk/models/shared/listfilesresponse.ts +src/sdk/models/shared/finetune.ts +src/sdk/models/shared/finetuneevent.ts src/sdk/models/shared/createfinetunerequest.ts +src/sdk/models/shared/listfinetuneeventsresponse.ts +src/sdk/models/shared/listfinetunesresponse.ts +src/sdk/models/shared/finetuningjob.ts src/sdk/models/shared/createfinetuningjobrequest.ts +src/sdk/models/shared/listfinetuningjobeventsresponse.ts +src/sdk/models/shared/finetuningjobevent.ts +src/sdk/models/shared/listpaginatedfinetuningjobsresponse.ts src/sdk/models/shared/imagesresponse.ts src/sdk/models/shared/image.ts src/sdk/models/shared/createimagerequest.ts src/sdk/models/shared/createimageeditrequest.ts src/sdk/models/shared/createimagevariationrequest.ts -src/sdk/models/shared/createmoderationresponse.ts -src/sdk/models/shared/createmoderationrequest.ts -src/sdk/models/shared/createtranscriptionresponse.ts -src/sdk/models/shared/createtranslationresponse.ts -src/sdk/models/shared/deletefileresponse.ts src/sdk/models/shared/deletemodelresponse.ts -src/sdk/models/shared/listfilesresponse.ts -src/sdk/models/shared/listfinetuneeventsresponse.ts -src/sdk/models/shared/listfinetunesresponse.ts -src/sdk/models/shared/listfinetuningjobeventsresponse.ts -src/sdk/models/shared/finetuningjobevent.ts src/sdk/models/shared/listmodelsresponse.ts src/sdk/models/shared/model.ts -src/sdk/models/shared/listpaginatedfinetuningjobsresponse.ts +src/sdk/models/shared/createmoderationresponse.ts +src/sdk/models/shared/createmoderationrequest.ts src/sdk/models/shared/security.ts src/sdk/models/shared/index.ts src/sdk/models/errors/index.ts USAGE.md -docs/models/operations/cancelfinetunerequest.md -docs/models/operations/cancelfinetuneresponse.md -docs/models/operations/cancelfinetuningjobrequest.md -docs/models/operations/cancelfinetuningjobresponse.md +docs/models/operations/createtranscriptionresponse.md +docs/models/operations/createtranslationresponse.md docs/models/operations/createchatcompletionresponse.md docs/models/operations/createcompletionresponse.md docs/models/operations/createeditresponse.md docs/models/operations/createembeddingresponse.md docs/models/operations/createfileresponse.md -docs/models/operations/createfinetuneresponse.md -docs/models/operations/createfinetuningjobresponse.md -docs/models/operations/createimageresponse.md -docs/models/operations/createimageeditresponse.md -docs/models/operations/createimagevariationresponse.md -docs/models/operations/createmoderationresponse.md -docs/models/operations/createtranscriptionresponse.md -docs/models/operations/createtranslationresponse.md docs/models/operations/deletefilerequest.md docs/models/operations/deletefileresponse.md -docs/models/operations/deletemodelrequest.md -docs/models/operations/deletemodelresponse.md docs/models/operations/downloadfilerequest.md docs/models/operations/downloadfileresponse.md docs/models/operations/listfilesresponse.md +docs/models/operations/retrievefilerequest.md +docs/models/operations/retrievefileresponse.md +docs/models/operations/cancelfinetunerequest.md +docs/models/operations/cancelfinetuneresponse.md +docs/models/operations/createfinetuneresponse.md docs/models/operations/listfinetuneeventsrequest.md docs/models/operations/listfinetuneeventsresponse.md docs/models/operations/listfinetunesresponse.md +docs/models/operations/retrievefinetunerequest.md +docs/models/operations/retrievefinetuneresponse.md +docs/models/operations/cancelfinetuningjobrequest.md +docs/models/operations/cancelfinetuningjobresponse.md +docs/models/operations/createfinetuningjobresponse.md docs/models/operations/listfinetuningeventsrequest.md docs/models/operations/listfinetuningeventsresponse.md -docs/models/operations/listmodelsresponse.md docs/models/operations/listpaginatedfinetuningjobsrequest.md docs/models/operations/listpaginatedfinetuningjobsresponse.md -docs/models/operations/retrievefilerequest.md -docs/models/operations/retrievefileresponse.md -docs/models/operations/retrievefinetunerequest.md -docs/models/operations/retrievefinetuneresponse.md docs/models/operations/retrievefinetuningjobrequest.md docs/models/operations/retrievefinetuningjobresponse.md +docs/models/operations/createimageresponse.md +docs/models/operations/createimageeditresponse.md +docs/models/operations/createimagevariationresponse.md +docs/models/operations/deletemodelrequest.md +docs/models/operations/deletemodelresponse.md +docs/models/operations/listmodelsresponse.md docs/models/operations/retrievemodelrequest.md docs/models/operations/retrievemodelresponse.md -docs/models/shared/finetunehyperparams.md -docs/models/shared/finetune.md -docs/models/shared/openaifile.md -docs/models/shared/finetuneevent.md -docs/models/shared/finetuningjoberror.md -docs/models/shared/finetuningjobhyperparameters.md -docs/models/shared/finetuningjob.md +docs/models/operations/createmoderationresponse.md +docs/models/shared/createtranscriptionresponse.md +docs/models/shared/createtranscriptionrequestfile.md +docs/models/shared/createtranscriptionrequestresponseformat.md +docs/models/shared/createtranscriptionrequest.md +docs/models/shared/createtranslationresponse.md +docs/models/shared/createtranslationrequestfile.md +docs/models/shared/createtranslationrequest.md docs/models/shared/createchatcompletionresponsechoicesfinishreason.md docs/models/shared/createchatcompletionresponsechoices.md docs/models/shared/createchatcompletionresponse.md @@ -160,9 +174,28 @@ docs/models/shared/createeditrequest.md docs/models/shared/createembeddingresponseusage.md docs/models/shared/createembeddingresponse.md docs/models/shared/embedding.md +docs/models/shared/createembeddingrequest.md +docs/models/shared/openaifile.md +docs/models/shared/createfilerequestfile.md +docs/models/shared/createfilerequest.md +docs/models/shared/deletefileresponse.md +docs/models/shared/listfilesresponse.md +docs/models/shared/finetunehyperparams.md +docs/models/shared/finetune.md +docs/models/shared/finetuneevent.md +docs/models/shared/createfinetunerequesthyperparameters.md docs/models/shared/createfinetunerequest.md +docs/models/shared/listfinetuneeventsresponse.md +docs/models/shared/listfinetunesresponse.md +docs/models/shared/finetuningjoberror.md +docs/models/shared/finetuningjobhyperparameters.md +docs/models/shared/finetuningjob.md docs/models/shared/createfinetuningjobrequesthyperparameters.md docs/models/shared/createfinetuningjobrequest.md +docs/models/shared/listfinetuningjobeventsresponse.md +docs/models/shared/finetuningjobeventlevel.md +docs/models/shared/finetuningjobevent.md +docs/models/shared/listpaginatedfinetuningjobsresponse.md docs/models/shared/imagesresponse.md docs/models/shared/image.md docs/models/shared/createimagerequestresponseformat.md @@ -177,25 +210,25 @@ docs/models/shared/createimagevariationrequestimage.md docs/models/shared/createimagevariationrequestresponseformat.md docs/models/shared/createimagevariationrequestsize.md docs/models/shared/createimagevariationrequest.md +docs/models/shared/deletemodelresponse.md +docs/models/shared/listmodelsresponse.md +docs/models/shared/model.md docs/models/shared/createmoderationresponseresultscategories.md docs/models/shared/createmoderationresponseresultscategoryscores.md docs/models/shared/createmoderationresponseresults.md docs/models/shared/createmoderationresponse.md docs/models/shared/createmoderationrequest.md -docs/models/shared/createtranscriptionresponse.md -docs/models/shared/createtranslationresponse.md -docs/models/shared/deletefileresponse.md -docs/models/shared/deletemodelresponse.md -docs/models/shared/listfilesresponse.md -docs/models/shared/listfinetuneeventsresponse.md -docs/models/shared/listfinetunesresponse.md -docs/models/shared/listfinetuningjobeventsresponse.md -docs/models/shared/finetuningjobeventlevel.md -docs/models/shared/finetuningjobevent.md -docs/models/shared/listmodelsresponse.md -docs/models/shared/model.md -docs/models/shared/listpaginatedfinetuningjobsresponse.md docs/models/shared/security.md docs/sdks/gpt/README.md -docs/sdks/openai/README.md +docs/sdks/audio/README.md +docs/sdks/chat/README.md +docs/sdks/completions/README.md +docs/sdks/edits/README.md +docs/sdks/embeddings/README.md +docs/sdks/files/README.md +docs/sdks/finetunes/README.md +docs/sdks/finetuning/README.md +docs/sdks/images/README.md +docs/sdks/models/README.md +docs/sdks/moderations/README.md .gitattributes \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 96508f8..1c5aef2 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,9 +1,9 @@ configVersion: 1.0.0 management: - docChecksum: 11e459ef36cd22c19855de8f048393af + docChecksum: d8810f26858855d4c1e35d5ef57d39ad docVersion: 2.0.0 - speakeasyVersion: 1.96.1 - generationVersion: 2.150.0 + speakeasyVersion: 1.97.1 + generationVersion: 2.152.1 generation: sdkClassName: gpt sdkFlattening: true @@ -16,7 +16,7 @@ features: globalSecurity: 2.82.0 globalServerURLs: 2.82.0 typescript: - version: 2.26.0 + version: 2.26.1 author: speakeasy-openai flattenGlobalSecurity: false maxMethodParams: 0 diff --git a/package-lock.json b/package-lock.json index b4a96b4..4105cea 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.26.0", + "version": "2.26.1", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.26.0", + "version": "2.26.1", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 7fb2eb8..793b233 100755 --- a/package.json +++ b/package.json @@ -1,9 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.26.0", + "version": "2.26.1", "author": "speakeasy-openai", "scripts": { - "prepare": "tsc --build" + "prepare": "tsc --build", + "check:tsc": "tsc --noEmit --skipLibCheck", + "check:eslint": "eslint --max-warnings=0 src", + "check": "npm run check:tsc && npm run check:eslint" }, "dependencies": { "axios": "^1.1.3", diff --git a/src/sdk/audio.ts b/src/sdk/audio.ts new file mode 100755 index 0000000..5333ff8 --- /dev/null +++ b/src/sdk/audio.ts @@ -0,0 +1,199 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import * as utils from "../internal/utils"; +import * as errors from "./models/errors"; +import * as operations from "./models/operations"; +import * as shared from "./models/shared"; +import { SDKConfiguration } from "./sdk"; +import { AxiosInstance, AxiosRequestConfig, AxiosResponse, RawAxiosRequestHeaders } from "axios"; + +/** + * Learn how to turn audio into text. + */ + +export class Audio { + private sdkConfiguration: SDKConfiguration; + + constructor(sdkConfig: SDKConfiguration) { + this.sdkConfiguration = sdkConfig; + } + + /** + * Transcribes audio into the input language. + */ + async createTranscription( + req: shared.CreateTranscriptionRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateTranscriptionRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/audio/transcriptions"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; + if (reqBody == null) throw new Error("request body is required"); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateTranscriptionResponse = + new operations.CreateTranscriptionResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createTranscriptionResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.CreateTranscriptionResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Translates audio into English. + */ + async createTranslation( + req: shared.CreateTranslationRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateTranslationRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/audio/translations"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; + if (reqBody == null) throw new Error("request body is required"); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateTranslationResponse = new operations.CreateTranslationResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createTranslationResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.CreateTranslationResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } +} diff --git a/src/sdk/chat.ts b/src/sdk/chat.ts new file mode 100755 index 0000000..4e8fc0c --- /dev/null +++ b/src/sdk/chat.ts @@ -0,0 +1,111 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import * as utils from "../internal/utils"; +import * as errors from "./models/errors"; +import * as operations from "./models/operations"; +import * as shared from "./models/shared"; +import { SDKConfiguration } from "./sdk"; +import { AxiosInstance, AxiosRequestConfig, AxiosResponse, RawAxiosRequestHeaders } from "axios"; + +/** + * Given a list of messages comprising a conversation, the model will return a response. + */ + +export class Chat { + private sdkConfiguration: SDKConfiguration; + + constructor(sdkConfig: SDKConfiguration) { + this.sdkConfiguration = sdkConfig; + } + + /** + * Creates a model response for the given chat conversation. + */ + async createChatCompletion( + req: shared.CreateChatCompletionRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateChatCompletionRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/chat/completions"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; + if (reqBody == null) throw new Error("request body is required"); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateChatCompletionResponse = + new operations.CreateChatCompletionResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createChatCompletionResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.CreateChatCompletionResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } +} diff --git a/src/sdk/completions.ts b/src/sdk/completions.ts new file mode 100755 index 0000000..821b113 --- /dev/null +++ b/src/sdk/completions.ts @@ -0,0 +1,110 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import * as utils from "../internal/utils"; +import * as errors from "./models/errors"; +import * as operations from "./models/operations"; +import * as shared from "./models/shared"; +import { SDKConfiguration } from "./sdk"; +import { AxiosInstance, AxiosRequestConfig, AxiosResponse, RawAxiosRequestHeaders } from "axios"; + +/** + * Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. + */ + +export class Completions { + private sdkConfiguration: SDKConfiguration; + + constructor(sdkConfig: SDKConfiguration) { + this.sdkConfiguration = sdkConfig; + } + + /** + * Creates a completion for the provided prompt and parameters. + */ + async createCompletion( + req: shared.CreateCompletionRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateCompletionRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/completions"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; + if (reqBody == null) throw new Error("request body is required"); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateCompletionResponse = new operations.CreateCompletionResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createCompletionResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.CreateCompletionResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } +} diff --git a/src/sdk/edits.ts b/src/sdk/edits.ts new file mode 100755 index 0000000..e4fdade --- /dev/null +++ b/src/sdk/edits.ts @@ -0,0 +1,112 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import * as utils from "../internal/utils"; +import * as errors from "./models/errors"; +import * as operations from "./models/operations"; +import * as shared from "./models/shared"; +import { SDKConfiguration } from "./sdk"; +import { AxiosInstance, AxiosRequestConfig, AxiosResponse, RawAxiosRequestHeaders } from "axios"; + +/** + * Given a prompt and an instruction, the model will return an edited version of the prompt. + */ + +export class Edits { + private sdkConfiguration: SDKConfiguration; + + constructor(sdkConfig: SDKConfiguration) { + this.sdkConfiguration = sdkConfig; + } + + /** + * Creates a new edit for the provided input, instruction, and parameters. + * + * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. + */ + async createEdit( + req: shared.CreateEditRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateEditRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/edits"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; + if (reqBody == null) throw new Error("request body is required"); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateEditResponse = new operations.CreateEditResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createEditResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.CreateEditResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } +} diff --git a/src/sdk/embeddings.ts b/src/sdk/embeddings.ts new file mode 100755 index 0000000..254105a --- /dev/null +++ b/src/sdk/embeddings.ts @@ -0,0 +1,110 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import * as utils from "../internal/utils"; +import * as errors from "./models/errors"; +import * as operations from "./models/operations"; +import * as shared from "./models/shared"; +import { SDKConfiguration } from "./sdk"; +import { AxiosInstance, AxiosRequestConfig, AxiosResponse, RawAxiosRequestHeaders } from "axios"; + +/** + * Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. + */ + +export class Embeddings { + private sdkConfiguration: SDKConfiguration; + + constructor(sdkConfig: SDKConfiguration) { + this.sdkConfiguration = sdkConfig; + } + + /** + * Creates an embedding vector representing the input text. + */ + async createEmbedding( + req: shared.CreateEmbeddingRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateEmbeddingRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/embeddings"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; + if (reqBody == null) throw new Error("request body is required"); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateEmbeddingResponse = new operations.CreateEmbeddingResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createEmbeddingResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.CreateEmbeddingResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } +} diff --git a/src/sdk/files.ts b/src/sdk/files.ts new file mode 100755 index 0000000..6bdbc10 --- /dev/null +++ b/src/sdk/files.ts @@ -0,0 +1,383 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import * as utils from "../internal/utils"; +import * as errors from "./models/errors"; +import * as operations from "./models/operations"; +import * as shared from "./models/shared"; +import { SDKConfiguration } from "./sdk"; +import { AxiosInstance, AxiosRequestConfig, AxiosResponse, RawAxiosRequestHeaders } from "axios"; + +/** + * Files are used to upload documents that can be used with features like fine-tuning. + */ + +export class Files { + private sdkConfiguration: SDKConfiguration; + + constructor(sdkConfig: SDKConfiguration) { + this.sdkConfiguration = sdkConfig; + } + + /** + * Upload a file that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please [contact us](https://help.openai.com/) if you need to increase the storage limit. + * + */ + async createFile( + req: shared.CreateFileRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateFileRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/files"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; + if (reqBody == null) throw new Error("request body is required"); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateFileResponse = new operations.CreateFileResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.openAIFile = utils.objectToClass(JSON.parse(decodedRes), shared.OpenAIFile); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Delete a file. + */ + async deleteFile( + req: operations.DeleteFileRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.DeleteFileRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/files/{file_id}", req); + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "delete", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.DeleteFileResponse = new operations.DeleteFileResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.deleteFileResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.DeleteFileResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Returns the contents of the specified file. + */ + async downloadFile( + req: operations.DownloadFileRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.DownloadFileRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/files/{file_id}/content", req); + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.DownloadFileResponse = new operations.DownloadFileResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.downloadFile200ApplicationJSONString = decodedRes; + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Returns a list of files that belong to the user's organization. + */ + async listFiles(config?: AxiosRequestConfig): Promise { + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/files"; + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.ListFilesResponse = new operations.ListFilesResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.listFilesResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.ListFilesResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Returns information about a specific file. + */ + async retrieveFile( + req: operations.RetrieveFileRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.RetrieveFileRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/files/{file_id}", req); + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.RetrieveFileResponse = new operations.RetrieveFileResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.openAIFile = utils.objectToClass(JSON.parse(decodedRes), shared.OpenAIFile); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } +} diff --git a/src/sdk/finetunes.ts b/src/sdk/finetunes.ts new file mode 100755 index 0000000..22bcb44 --- /dev/null +++ b/src/sdk/finetunes.ts @@ -0,0 +1,405 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import * as utils from "../internal/utils"; +import * as errors from "./models/errors"; +import * as operations from "./models/operations"; +import * as shared from "./models/shared"; +import { SDKConfiguration } from "./sdk"; +import { AxiosInstance, AxiosRequestConfig, AxiosResponse, RawAxiosRequestHeaders } from "axios"; + +/** + * Manage legacy fine-tuning jobs to tailor a model to your specific training data. + */ + +export class FineTunes { + private sdkConfiguration: SDKConfiguration; + + constructor(sdkConfig: SDKConfiguration) { + this.sdkConfiguration = sdkConfig; + } + + /** + * Immediately cancel a fine-tune job. + * + * + * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. + */ + async cancelFineTune( + req: operations.CancelFineTuneRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.CancelFineTuneRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}/cancel", req); + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CancelFineTuneResponse = new operations.CancelFineTuneResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.fineTune = utils.objectToClass(JSON.parse(decodedRes), shared.FineTune); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Creates a job that fine-tunes a specified model from a given dataset. + * + * Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + * + * [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + * + * + * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. + */ + async createFineTune( + req: shared.CreateFineTuneRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateFineTuneRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/fine-tunes"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; + if (reqBody == null) throw new Error("request body is required"); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateFineTuneResponse = new operations.CreateFineTuneResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.fineTune = utils.objectToClass(JSON.parse(decodedRes), shared.FineTune); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Get fine-grained status updates for a fine-tune job. + * + * + * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. + */ + async listFineTuneEvents( + req: operations.ListFineTuneEventsRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.ListFineTuneEventsRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}/events", req); + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + const queryParams: string = utils.serializeQueryParams(req); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url + queryParams, + method: "get", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.ListFineTuneEventsResponse = + new operations.ListFineTuneEventsResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.listFineTuneEventsResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.ListFineTuneEventsResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * List your organization's fine-tuning jobs + * + * + * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. + */ + async listFineTunes(config?: AxiosRequestConfig): Promise { + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/fine-tunes"; + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.ListFineTunesResponse = new operations.ListFineTunesResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.listFineTunesResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.ListFineTunesResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Gets info about the fine-tune job. + * + * [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + * + * + * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. + */ + async retrieveFineTune( + req: operations.RetrieveFineTuneRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.RetrieveFineTuneRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}", req); + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.RetrieveFineTuneResponse = new operations.RetrieveFineTuneResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.fineTune = utils.objectToClass(JSON.parse(decodedRes), shared.FineTune); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } +} diff --git a/src/sdk/finetuning.ts b/src/sdk/finetuning.ts new file mode 100755 index 0000000..f5edefb --- /dev/null +++ b/src/sdk/finetuning.ts @@ -0,0 +1,428 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import * as utils from "../internal/utils"; +import * as errors from "./models/errors"; +import * as operations from "./models/operations"; +import * as shared from "./models/shared"; +import { SDKConfiguration } from "./sdk"; +import { AxiosInstance, AxiosRequestConfig, AxiosResponse, RawAxiosRequestHeaders } from "axios"; + +/** + * Manage fine-tuning jobs to tailor a model to your specific training data. + */ + +export class FineTuning { + private sdkConfiguration: SDKConfiguration; + + constructor(sdkConfig: SDKConfiguration) { + this.sdkConfiguration = sdkConfig; + } + + /** + * Immediately cancel a fine-tune job. + * + */ + async cancelFineTuningJob( + req: operations.CancelFineTuningJobRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.CancelFineTuningJobRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL( + baseURL, + "/fine_tuning/jobs/{fine_tuning_job_id}/cancel", + req + ); + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CancelFineTuningJobResponse = + new operations.CancelFineTuningJobResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.fineTuningJob = utils.objectToClass( + JSON.parse(decodedRes), + shared.FineTuningJob + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Creates a job that fine-tunes a specified model from a given dataset. + * + * Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + * + * [Learn more about fine-tuning](/docs/guides/fine-tuning) + * + */ + async createFineTuningJob( + req: shared.CreateFineTuningJobRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateFineTuningJobRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/fine_tuning/jobs"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; + if (reqBody == null) throw new Error("request body is required"); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateFineTuningJobResponse = + new operations.CreateFineTuningJobResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.fineTuningJob = utils.objectToClass( + JSON.parse(decodedRes), + shared.FineTuningJob + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Get status updates for a fine-tuning job. + * + */ + async listFineTuningEvents( + req: operations.ListFineTuningEventsRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.ListFineTuningEventsRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL( + baseURL, + "/fine_tuning/jobs/{fine_tuning_job_id}/events", + req + ); + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + const queryParams: string = utils.serializeQueryParams(req); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url + queryParams, + method: "get", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.ListFineTuningEventsResponse = + new operations.ListFineTuningEventsResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.listFineTuningJobEventsResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.ListFineTuningJobEventsResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * List your organization's fine-tuning jobs + * + */ + async listPaginatedFineTuningJobs( + req: operations.ListPaginatedFineTuningJobsRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.ListPaginatedFineTuningJobsRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/fine_tuning/jobs"; + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + const queryParams: string = utils.serializeQueryParams(req); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url + queryParams, + method: "get", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.ListPaginatedFineTuningJobsResponse = + new operations.ListPaginatedFineTuningJobsResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.listPaginatedFineTuningJobsResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.ListPaginatedFineTuningJobsResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Get info about a fine-tuning job. + * + * [Learn more about fine-tuning](/docs/guides/fine-tuning) + * + */ + async retrieveFineTuningJob( + req: operations.RetrieveFineTuningJobRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.RetrieveFineTuningJobRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL( + baseURL, + "/fine_tuning/jobs/{fine_tuning_job_id}", + req + ); + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.RetrieveFineTuningJobResponse = + new operations.RetrieveFineTuningJobResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.fineTuningJob = utils.objectToClass( + JSON.parse(decodedRes), + shared.FineTuningJob + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } +} diff --git a/src/sdk/images.ts b/src/sdk/images.ts new file mode 100755 index 0000000..e7e6541 --- /dev/null +++ b/src/sdk/images.ts @@ -0,0 +1,287 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import * as utils from "../internal/utils"; +import * as errors from "./models/errors"; +import * as operations from "./models/operations"; +import * as shared from "./models/shared"; +import { SDKConfiguration } from "./sdk"; +import { AxiosInstance, AxiosRequestConfig, AxiosResponse, RawAxiosRequestHeaders } from "axios"; + +/** + * Given a prompt and/or an input image, the model will generate a new image. + */ + +export class Images { + private sdkConfiguration: SDKConfiguration; + + constructor(sdkConfig: SDKConfiguration) { + this.sdkConfiguration = sdkConfig; + } + + /** + * Creates an image given a prompt. + */ + async createImage( + req: shared.CreateImageRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateImageRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/images/generations"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; + if (reqBody == null) throw new Error("request body is required"); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateImageResponse = new operations.CreateImageResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.imagesResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.ImagesResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Creates an edited or extended image given an original image and a prompt. + */ + async createImageEdit( + req: shared.CreateImageEditRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateImageEditRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/images/edits"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; + if (reqBody == null) throw new Error("request body is required"); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateImageEditResponse = new operations.CreateImageEditResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.imagesResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.ImagesResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Creates a variation of a given image. + */ + async createImageVariation( + req: shared.CreateImageVariationRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateImageVariationRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/images/variations"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; + if (reqBody == null) throw new Error("request body is required"); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateImageVariationResponse = + new operations.CreateImageVariationResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.imagesResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.ImagesResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } +} diff --git a/src/sdk/models.ts b/src/sdk/models.ts new file mode 100755 index 0000000..1bb8ea0 --- /dev/null +++ b/src/sdk/models.ts @@ -0,0 +1,228 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import * as utils from "../internal/utils"; +import * as errors from "./models/errors"; +import * as operations from "./models/operations"; +import * as shared from "./models/shared"; +import { SDKConfiguration } from "./sdk"; +import { AxiosInstance, AxiosRequestConfig, AxiosResponse, RawAxiosRequestHeaders } from "axios"; + +/** + * List and describe the various models available in the API. + */ + +export class Models { + private sdkConfiguration: SDKConfiguration; + + constructor(sdkConfig: SDKConfiguration) { + this.sdkConfiguration = sdkConfig; + } + + /** + * Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + */ + async deleteModel( + req: operations.DeleteModelRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.DeleteModelRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/models/{model}", req); + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "delete", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.DeleteModelResponse = new operations.DeleteModelResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.deleteModelResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.DeleteModelResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Lists the currently available models, and provides basic information about each one such as the owner and availability. + */ + async listModels(config?: AxiosRequestConfig): Promise { + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/models"; + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.ListModelsResponse = new operations.ListModelsResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.listModelsResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.ListModelsResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } + + /** + * Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + */ + async retrieveModel( + req: operations.RetrieveModelRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new operations.RetrieveModelRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = utils.generateURL(baseURL, "/models/{model}", req); + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "get", + headers: headers, + responseType: "arraybuffer", + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.RetrieveModelResponse = new operations.RetrieveModelResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.model = utils.objectToClass(JSON.parse(decodedRes), shared.Model); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } +} diff --git a/src/sdk/models/shared/createembeddingrequest.ts b/src/sdk/models/shared/createembeddingrequest.ts new file mode 100755 index 0000000..4b49204 --- /dev/null +++ b/src/sdk/models/shared/createembeddingrequest.ts @@ -0,0 +1,38 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; +import { Expose } from "class-transformer"; + +export class CreateEmbeddingRequest extends SpeakeasyBase { + /** + * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + * + * @remarks + * + */ + @SpeakeasyMetadata() + @Expose({ name: "input" }) + input: any; + + /** + * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + * + * @remarks + * + */ + @SpeakeasyMetadata() + @Expose({ name: "model" }) + model: any; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + * + * @remarks + * + */ + @SpeakeasyMetadata() + @Expose({ name: "user" }) + user?: string; +} diff --git a/src/sdk/models/shared/createfilerequest.ts b/src/sdk/models/shared/createfilerequest.ts new file mode 100755 index 0000000..eb2eec1 --- /dev/null +++ b/src/sdk/models/shared/createfilerequest.ts @@ -0,0 +1,37 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; + +export class CreateFileRequestFile extends SpeakeasyBase { + @SpeakeasyMetadata({ data: "multipart_form, content=true" }) + content: Uint8Array; + + @SpeakeasyMetadata({ data: "multipart_form, name=file" }) + file: string; +} + +export class CreateFileRequest extends SpeakeasyBase { + /** + * The file object (not file name) to be uploaded. + * + * @remarks + * + * If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. + * + */ + @SpeakeasyMetadata({ data: "multipart_form, file=true" }) + file: CreateFileRequestFile; + + /** + * The intended purpose of the uploaded file. + * + * @remarks + * + * Use "fine-tune" for [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file is correct for fine-tuning. + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=purpose" }) + purpose: string; +} diff --git a/src/sdk/models/shared/createfinetunerequest.ts b/src/sdk/models/shared/createfinetunerequest.ts index 8a96f33..f3b9d0c 100755 --- a/src/sdk/models/shared/createfinetunerequest.ts +++ b/src/sdk/models/shared/createfinetunerequest.ts @@ -3,7 +3,23 @@ */ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; -import { Expose } from "class-transformer"; +import { Expose, Type } from "class-transformer"; + +/** + * The hyperparameters used for the fine-tuning job. + */ +export class CreateFineTuneRequestHyperparameters extends SpeakeasyBase { + /** + * The number of epochs to train the model for. An epoch refers to one + * + * @remarks + * full cycle through the training dataset. + * + */ + @SpeakeasyMetadata() + @Expose({ name: "n_epochs" }) + nEpochs?: any; +} export class CreateFineTuneRequest extends SpeakeasyBase { /** @@ -81,6 +97,14 @@ export class CreateFineTuneRequest extends SpeakeasyBase { @Expose({ name: "compute_classification_metrics" }) computeClassificationMetrics?: boolean; + /** + * The hyperparameters used for the fine-tuning job. + */ + @SpeakeasyMetadata() + @Expose({ name: "hyperparameters" }) + @Type(() => CreateFineTuneRequestHyperparameters) + hyperparameters?: CreateFineTuneRequestHyperparameters; + /** * The learning rate multiplier to use for training. * @@ -112,17 +136,6 @@ export class CreateFineTuneRequest extends SpeakeasyBase { @Expose({ name: "model" }) model?: any; - /** - * The number of epochs to train the model for. An epoch refers to one - * - * @remarks - * full cycle through the training dataset. - * - */ - @SpeakeasyMetadata() - @Expose({ name: "n_epochs" }) - nEpochs?: number; - /** * The weight to use for loss on the prompt tokens. This controls how * diff --git a/src/sdk/models/shared/createtranscriptionrequest.ts b/src/sdk/models/shared/createtranscriptionrequest.ts new file mode 100755 index 0000000..c547668 --- /dev/null +++ b/src/sdk/models/shared/createtranscriptionrequest.ts @@ -0,0 +1,83 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; + +export class CreateTranscriptionRequestFile extends SpeakeasyBase { + @SpeakeasyMetadata({ data: "multipart_form, content=true" }) + content: Uint8Array; + + @SpeakeasyMetadata({ data: "multipart_form, name=file" }) + file: string; +} + +/** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + * + * @remarks + * + */ +export enum CreateTranscriptionRequestResponseFormat { + Json = "json", + Text = "text", + Srt = "srt", + VerboseJson = "verbose_json", + Vtt = "vtt", +} + +export class CreateTranscriptionRequest extends SpeakeasyBase { + /** + * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, file=true" }) + file: CreateTranscriptionRequestFile; + + /** + * The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=language" }) + language?: string; + + /** + * ID of the model to use. Only `whisper-1` is currently available. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=model;json=true" }) + model: any; + + /** + * An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=prompt" }) + prompt?: string; + + /** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) + responseFormat?: CreateTranscriptionRequestResponseFormat; + + /** + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=temperature" }) + temperature?: number; +} diff --git a/src/sdk/models/shared/createtranslationrequest.ts b/src/sdk/models/shared/createtranslationrequest.ts new file mode 100755 index 0000000..4c95f6e --- /dev/null +++ b/src/sdk/models/shared/createtranslationrequest.ts @@ -0,0 +1,60 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; + +export class CreateTranslationRequestFile extends SpeakeasyBase { + @SpeakeasyMetadata({ data: "multipart_form, content=true" }) + content: Uint8Array; + + @SpeakeasyMetadata({ data: "multipart_form, name=file" }) + file: string; +} + +export class CreateTranslationRequest extends SpeakeasyBase { + /** + * The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, file=true" }) + file: CreateTranslationRequestFile; + + /** + * ID of the model to use. Only `whisper-1` is currently available. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=model;json=true" }) + model: any; + + /** + * An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=prompt" }) + prompt?: string; + + /** + * The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=response_format" }) + responseFormat?: string; + + /** + * The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + * + * @remarks + * + */ + @SpeakeasyMetadata({ data: "multipart_form, name=temperature" }) + temperature?: number; +} diff --git a/src/sdk/models/shared/index.ts b/src/sdk/models/shared/index.ts index 5f70c40..7fff7c8 100755 --- a/src/sdk/models/shared/index.ts +++ b/src/sdk/models/shared/index.ts @@ -12,7 +12,9 @@ export * from "./createcompletionrequest"; export * from "./createcompletionresponse"; export * from "./createeditrequest"; export * from "./createeditresponse"; +export * from "./createembeddingrequest"; export * from "./createembeddingresponse"; +export * from "./createfilerequest"; export * from "./createfinetunerequest"; export * from "./createfinetuningjobrequest"; export * from "./createimageeditrequest"; @@ -20,7 +22,9 @@ export * from "./createimagerequest"; export * from "./createimagevariationrequest"; export * from "./createmoderationrequest"; export * from "./createmoderationresponse"; +export * from "./createtranscriptionrequest"; export * from "./createtranscriptionresponse"; +export * from "./createtranslationrequest"; export * from "./createtranslationresponse"; export * from "./deletefileresponse"; export * from "./deletemodelresponse"; diff --git a/src/sdk/moderations.ts b/src/sdk/moderations.ts new file mode 100755 index 0000000..bc147b1 --- /dev/null +++ b/src/sdk/moderations.ts @@ -0,0 +1,110 @@ +/* + * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. + */ + +import * as utils from "../internal/utils"; +import * as errors from "./models/errors"; +import * as operations from "./models/operations"; +import * as shared from "./models/shared"; +import { SDKConfiguration } from "./sdk"; +import { AxiosInstance, AxiosRequestConfig, AxiosResponse, RawAxiosRequestHeaders } from "axios"; + +/** + * Given a input text, outputs if the model classifies it as violating OpenAI's content policy. + */ + +export class Moderations { + private sdkConfiguration: SDKConfiguration; + + constructor(sdkConfig: SDKConfiguration) { + this.sdkConfiguration = sdkConfig; + } + + /** + * Classifies if text violates OpenAI's Content Policy + */ + async createModeration( + req: shared.CreateModerationRequest, + config?: AxiosRequestConfig + ): Promise { + if (!(req instanceof utils.SpeakeasyBase)) { + req = new shared.CreateModerationRequest(req); + } + + const baseURL: string = utils.templateUrl( + this.sdkConfiguration.serverURL, + this.sdkConfiguration.serverDefaults + ); + const url: string = baseURL.replace(/\/$/, "") + "/moderations"; + + let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; + + try { + [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); + } catch (e: unknown) { + if (e instanceof Error) { + throw new Error(`Error serializing request body, cause: ${e.message}`); + } + } + const client: AxiosInstance = this.sdkConfiguration.defaultClient; + let globalSecurity = this.sdkConfiguration.security; + if (typeof globalSecurity === "function") { + globalSecurity = await globalSecurity(); + } + if (!(globalSecurity instanceof utils.SpeakeasyBase)) { + globalSecurity = new shared.Security(globalSecurity); + } + const properties = utils.parseSecurityProperties(globalSecurity); + const headers: RawAxiosRequestHeaders = { + ...reqBodyHeaders, + ...config?.headers, + ...properties.headers, + }; + if (reqBody == null) throw new Error("request body is required"); + headers["Accept"] = "application/json"; + + headers["user-agent"] = this.sdkConfiguration.userAgent; + + const httpRes: AxiosResponse = await client.request({ + validateStatus: () => true, + url: url, + method: "post", + headers: headers, + responseType: "arraybuffer", + data: reqBody, + ...config, + }); + + const contentType: string = httpRes?.headers?.["content-type"] ?? ""; + + if (httpRes?.status == null) { + throw new Error(`status code not found in response: ${httpRes}`); + } + + const res: operations.CreateModerationResponse = new operations.CreateModerationResponse({ + statusCode: httpRes.status, + contentType: contentType, + rawResponse: httpRes, + }); + const decodedRes = new TextDecoder().decode(httpRes?.data); + switch (true) { + case httpRes?.status == 200: + if (utils.matchContentType(contentType, `application/json`)) { + res.createModerationResponse = utils.objectToClass( + JSON.parse(decodedRes), + shared.CreateModerationResponse + ); + } else { + throw new errors.SDKError( + "unknown content-type received: " + contentType, + httpRes.status, + decodedRes, + httpRes + ); + } + break; + } + + return res; + } +} diff --git a/src/sdk/openai.ts b/src/sdk/openai.ts deleted file mode 100755 index 452808f..0000000 --- a/src/sdk/openai.ts +++ /dev/null @@ -1,2247 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT. - */ - -import * as utils from "../internal/utils"; -import * as errors from "./models/errors"; -import * as operations from "./models/operations"; -import * as shared from "./models/shared"; -import { SDKConfiguration } from "./sdk"; -import { AxiosInstance, AxiosRequestConfig, AxiosResponse, RawAxiosRequestHeaders } from "axios"; - -/** - * The OpenAI REST API - */ - -export class OpenAI { - private sdkConfiguration: SDKConfiguration; - - constructor(sdkConfig: SDKConfiguration) { - this.sdkConfiguration = sdkConfig; - } - - /** - * Immediately cancel a fine-tune job. - * - * - * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. - */ - async cancelFineTune( - req: operations.CancelFineTuneRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.CancelFineTuneRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}/cancel", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CancelFineTuneResponse = new operations.CancelFineTuneResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.fineTune = utils.objectToClass(JSON.parse(decodedRes), shared.FineTune); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Immediately cancel a fine-tune job. - * - */ - async cancelFineTuningJob( - req: operations.CancelFineTuningJobRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.CancelFineTuningJobRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = utils.generateURL( - baseURL, - "/fine_tuning/jobs/{fine_tuning_job_id}/cancel", - req - ); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CancelFineTuningJobResponse = - new operations.CancelFineTuningJobResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.fineTuningJob = utils.objectToClass( - JSON.parse(decodedRes), - shared.FineTuningJob - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Creates a model response for the given chat conversation. - */ - async createChatCompletion( - req: shared.CreateChatCompletionRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateChatCompletionRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/chat/completions"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { - ...reqBodyHeaders, - ...config?.headers, - ...properties.headers, - }; - if (reqBody == null) throw new Error("request body is required"); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateChatCompletionResponse = - new operations.CreateChatCompletionResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createChatCompletionResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.CreateChatCompletionResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Creates a completion for the provided prompt and parameters. - */ - async createCompletion( - req: shared.CreateCompletionRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateCompletionRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/completions"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { - ...reqBodyHeaders, - ...config?.headers, - ...properties.headers, - }; - if (reqBody == null) throw new Error("request body is required"); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateCompletionResponse = new operations.CreateCompletionResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createCompletionResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.CreateCompletionResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Creates a new edit for the provided input, instruction, and parameters. - * - * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. - */ - async createEdit( - req: shared.CreateEditRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateEditRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/edits"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { - ...reqBodyHeaders, - ...config?.headers, - ...properties.headers, - }; - if (reqBody == null) throw new Error("request body is required"); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateEditResponse = new operations.CreateEditResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createEditResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.CreateEditResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Creates an embedding vector representing the input text. - */ - async createEmbedding( - req: Record, - config?: AxiosRequestConfig - ): Promise { - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/embeddings"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { - ...reqBodyHeaders, - ...config?.headers, - ...properties.headers, - }; - if (reqBody == null) throw new Error("request body is required"); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateEmbeddingResponse = new operations.CreateEmbeddingResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createEmbeddingResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.CreateEmbeddingResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Upload a file that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please [contact us](https://help.openai.com/) if you need to increase the storage limit. - * - */ - async createFile( - req: Record, - config?: AxiosRequestConfig - ): Promise { - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/files"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { - ...reqBodyHeaders, - ...config?.headers, - ...properties.headers, - }; - if (reqBody == null) throw new Error("request body is required"); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateFileResponse = new operations.CreateFileResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.openAIFile = utils.objectToClass(JSON.parse(decodedRes), shared.OpenAIFile); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Creates a job that fine-tunes a specified model from a given dataset. - * - * Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - * - * [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - * - * - * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. - */ - async createFineTune( - req: shared.CreateFineTuneRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateFineTuneRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/fine-tunes"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { - ...reqBodyHeaders, - ...config?.headers, - ...properties.headers, - }; - if (reqBody == null) throw new Error("request body is required"); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateFineTuneResponse = new operations.CreateFineTuneResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.fineTune = utils.objectToClass(JSON.parse(decodedRes), shared.FineTune); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Creates a job that fine-tunes a specified model from a given dataset. - * - * Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - * - * [Learn more about fine-tuning](/docs/guides/fine-tuning) - * - */ - async createFineTuningJob( - req: shared.CreateFineTuningJobRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateFineTuningJobRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/fine_tuning/jobs"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { - ...reqBodyHeaders, - ...config?.headers, - ...properties.headers, - }; - if (reqBody == null) throw new Error("request body is required"); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateFineTuningJobResponse = - new operations.CreateFineTuningJobResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.fineTuningJob = utils.objectToClass( - JSON.parse(decodedRes), - shared.FineTuningJob - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Creates an image given a prompt. - */ - async createImage( - req: shared.CreateImageRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateImageRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/images/generations"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { - ...reqBodyHeaders, - ...config?.headers, - ...properties.headers, - }; - if (reqBody == null) throw new Error("request body is required"); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateImageResponse = new operations.CreateImageResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.imagesResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.ImagesResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Creates an edited or extended image given an original image and a prompt. - */ - async createImageEdit( - req: shared.CreateImageEditRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateImageEditRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/images/edits"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { - ...reqBodyHeaders, - ...config?.headers, - ...properties.headers, - }; - if (reqBody == null) throw new Error("request body is required"); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateImageEditResponse = new operations.CreateImageEditResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.imagesResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.ImagesResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Creates a variation of a given image. - */ - async createImageVariation( - req: shared.CreateImageVariationRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateImageVariationRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/images/variations"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { - ...reqBodyHeaders, - ...config?.headers, - ...properties.headers, - }; - if (reqBody == null) throw new Error("request body is required"); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateImageVariationResponse = - new operations.CreateImageVariationResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.imagesResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.ImagesResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Classifies if text violates OpenAI's Content Policy - */ - async createModeration( - req: shared.CreateModerationRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new shared.CreateModerationRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/moderations"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "json"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { - ...reqBodyHeaders, - ...config?.headers, - ...properties.headers, - }; - if (reqBody == null) throw new Error("request body is required"); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateModerationResponse = new operations.CreateModerationResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createModerationResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.CreateModerationResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Transcribes audio into the input language. - */ - async createTranscription( - req: Record, - config?: AxiosRequestConfig - ): Promise { - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/audio/transcriptions"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { - ...reqBodyHeaders, - ...config?.headers, - ...properties.headers, - }; - if (reqBody == null) throw new Error("request body is required"); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateTranscriptionResponse = - new operations.CreateTranscriptionResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createTranscriptionResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.CreateTranscriptionResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Translates audio into English. - */ - async createTranslation( - req: Record, - config?: AxiosRequestConfig - ): Promise { - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/audio/translations"; - - let [reqBodyHeaders, reqBody]: [object, any] = [{}, null]; - - try { - [reqBodyHeaders, reqBody] = utils.serializeRequestBody(req, "request", "multipart"); - } catch (e: unknown) { - if (e instanceof Error) { - throw new Error(`Error serializing request body, cause: ${e.message}`); - } - } - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { - ...reqBodyHeaders, - ...config?.headers, - ...properties.headers, - }; - if (reqBody == null) throw new Error("request body is required"); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "post", - headers: headers, - responseType: "arraybuffer", - data: reqBody, - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.CreateTranslationResponse = new operations.CreateTranslationResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.createTranslationResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.CreateTranslationResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Delete a file. - */ - async deleteFile( - req: operations.DeleteFileRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.DeleteFileRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = utils.generateURL(baseURL, "/files/{file_id}", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "delete", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.DeleteFileResponse = new operations.DeleteFileResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.deleteFileResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.DeleteFileResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. - */ - async deleteModel( - req: operations.DeleteModelRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.DeleteModelRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = utils.generateURL(baseURL, "/models/{model}", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "delete", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.DeleteModelResponse = new operations.DeleteModelResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.deleteModelResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.DeleteModelResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Returns the contents of the specified file. - */ - async downloadFile( - req: operations.DownloadFileRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.DownloadFileRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = utils.generateURL(baseURL, "/files/{file_id}/content", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "get", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.DownloadFileResponse = new operations.DownloadFileResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.downloadFile200ApplicationJSONString = decodedRes; - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Returns a list of files that belong to the user's organization. - */ - async listFiles(config?: AxiosRequestConfig): Promise { - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/files"; - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "get", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.ListFilesResponse = new operations.ListFilesResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.listFilesResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.ListFilesResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Get fine-grained status updates for a fine-tune job. - * - * - * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. - */ - async listFineTuneEvents( - req: operations.ListFineTuneEventsRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.ListFineTuneEventsRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}/events", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - const queryParams: string = utils.serializeQueryParams(req); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url + queryParams, - method: "get", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.ListFineTuneEventsResponse = - new operations.ListFineTuneEventsResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.listFineTuneEventsResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.ListFineTuneEventsResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * List your organization's fine-tuning jobs - * - * - * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. - */ - async listFineTunes(config?: AxiosRequestConfig): Promise { - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/fine-tunes"; - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "get", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.ListFineTunesResponse = new operations.ListFineTunesResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.listFineTunesResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.ListFineTunesResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Get status updates for a fine-tuning job. - * - */ - async listFineTuningEvents( - req: operations.ListFineTuningEventsRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.ListFineTuningEventsRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = utils.generateURL( - baseURL, - "/fine_tuning/jobs/{fine_tuning_job_id}/events", - req - ); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - const queryParams: string = utils.serializeQueryParams(req); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url + queryParams, - method: "get", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.ListFineTuningEventsResponse = - new operations.ListFineTuningEventsResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.listFineTuningJobEventsResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.ListFineTuningJobEventsResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Lists the currently available models, and provides basic information about each one such as the owner and availability. - */ - async listModels(config?: AxiosRequestConfig): Promise { - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/models"; - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "get", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.ListModelsResponse = new operations.ListModelsResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.listModelsResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.ListModelsResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * List your organization's fine-tuning jobs - * - */ - async listPaginatedFineTuningJobs( - req: operations.ListPaginatedFineTuningJobsRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.ListPaginatedFineTuningJobsRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = baseURL.replace(/\/$/, "") + "/fine_tuning/jobs"; - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - const queryParams: string = utils.serializeQueryParams(req); - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url + queryParams, - method: "get", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.ListPaginatedFineTuningJobsResponse = - new operations.ListPaginatedFineTuningJobsResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.listPaginatedFineTuningJobsResponse = utils.objectToClass( - JSON.parse(decodedRes), - shared.ListPaginatedFineTuningJobsResponse - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Returns information about a specific file. - */ - async retrieveFile( - req: operations.RetrieveFileRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.RetrieveFileRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = utils.generateURL(baseURL, "/files/{file_id}", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "get", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.RetrieveFileResponse = new operations.RetrieveFileResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.openAIFile = utils.objectToClass(JSON.parse(decodedRes), shared.OpenAIFile); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Gets info about the fine-tune job. - * - * [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) - * - * - * @deprecated method: This will be removed in a future release, please migrate away from it as soon as possible. - */ - async retrieveFineTune( - req: operations.RetrieveFineTuneRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.RetrieveFineTuneRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = utils.generateURL(baseURL, "/fine-tunes/{fine_tune_id}", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "get", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.RetrieveFineTuneResponse = new operations.RetrieveFineTuneResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.fineTune = utils.objectToClass(JSON.parse(decodedRes), shared.FineTune); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Get info about a fine-tuning job. - * - * [Learn more about fine-tuning](/docs/guides/fine-tuning) - * - */ - async retrieveFineTuningJob( - req: operations.RetrieveFineTuningJobRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.RetrieveFineTuningJobRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = utils.generateURL( - baseURL, - "/fine_tuning/jobs/{fine_tuning_job_id}", - req - ); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "get", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.RetrieveFineTuningJobResponse = - new operations.RetrieveFineTuningJobResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.fineTuningJob = utils.objectToClass( - JSON.parse(decodedRes), - shared.FineTuningJob - ); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } - - /** - * Retrieves a model instance, providing basic information about the model such as the owner and permissioning. - */ - async retrieveModel( - req: operations.RetrieveModelRequest, - config?: AxiosRequestConfig - ): Promise { - if (!(req instanceof utils.SpeakeasyBase)) { - req = new operations.RetrieveModelRequest(req); - } - - const baseURL: string = utils.templateUrl( - this.sdkConfiguration.serverURL, - this.sdkConfiguration.serverDefaults - ); - const url: string = utils.generateURL(baseURL, "/models/{model}", req); - const client: AxiosInstance = this.sdkConfiguration.defaultClient; - let globalSecurity = this.sdkConfiguration.security; - if (typeof globalSecurity === "function") { - globalSecurity = await globalSecurity(); - } - if (!(globalSecurity instanceof utils.SpeakeasyBase)) { - globalSecurity = new shared.Security(globalSecurity); - } - const properties = utils.parseSecurityProperties(globalSecurity); - const headers: RawAxiosRequestHeaders = { ...config?.headers, ...properties.headers }; - headers["Accept"] = "application/json"; - - headers["user-agent"] = this.sdkConfiguration.userAgent; - - const httpRes: AxiosResponse = await client.request({ - validateStatus: () => true, - url: url, - method: "get", - headers: headers, - responseType: "arraybuffer", - ...config, - }); - - const contentType: string = httpRes?.headers?.["content-type"] ?? ""; - - if (httpRes?.status == null) { - throw new Error(`status code not found in response: ${httpRes}`); - } - - const res: operations.RetrieveModelResponse = new operations.RetrieveModelResponse({ - statusCode: httpRes.status, - contentType: contentType, - rawResponse: httpRes, - }); - const decodedRes = new TextDecoder().decode(httpRes?.data); - switch (true) { - case httpRes?.status == 200: - if (utils.matchContentType(contentType, `application/json`)) { - res.model = utils.objectToClass(JSON.parse(decodedRes), shared.Model); - } else { - throw new errors.SDKError( - "unknown content-type received: " + contentType, - httpRes.status, - decodedRes, - httpRes - ); - } - break; - } - - return res; - } -} diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index b22f7fc..897ec56 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -3,8 +3,18 @@ */ import * as utils from "../internal/utils"; +import { Audio } from "./audio"; +import { Chat } from "./chat"; +import { Completions } from "./completions"; +import { Edits } from "./edits"; +import { Embeddings } from "./embeddings"; +import { Files } from "./files"; +import { FineTunes } from "./finetunes"; +import { FineTuning } from "./finetuning"; +import { Images } from "./images"; +import { Models } from "./models"; import * as shared from "./models/shared"; -import { OpenAI } from "./openai"; +import { Moderations } from "./moderations"; import axios from "axios"; import { AxiosInstance } from "axios"; @@ -49,9 +59,9 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.26.0"; - genVersion = "2.150.0"; - userAgent = "speakeasy-sdk/typescript 2.26.0 2.150.0 2.0.0 @speakeasy-api/openai"; + sdkVersion = "2.26.1"; + genVersion = "2.152.1"; + userAgent = "speakeasy-sdk/typescript 2.26.1 2.152.1 2.0.0 @speakeasy-api/openai"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); @@ -63,9 +73,49 @@ export class SDKConfiguration { */ export class Gpt { /** - * The OpenAI REST API + * Learn how to turn audio into text. */ - public openAI: OpenAI; + public audio: Audio; + /** + * Given a list of messages comprising a conversation, the model will return a response. + */ + public chat: Chat; + /** + * Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. + */ + public completions: Completions; + /** + * Given a prompt and an instruction, the model will return an edited version of the prompt. + */ + public edits: Edits; + /** + * Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. + */ + public embeddings: Embeddings; + /** + * Files are used to upload documents that can be used with features like fine-tuning. + */ + public files: Files; + /** + * Manage legacy fine-tuning jobs to tailor a model to your specific training data. + */ + public fineTunes: FineTunes; + /** + * Manage fine-tuning jobs to tailor a model to your specific training data. + */ + public fineTuning: FineTuning; + /** + * Given a prompt and/or an input image, the model will generate a new image. + */ + public images: Images; + /** + * List and describe the various models available in the API. + */ + public models: Models; + /** + * Given a input text, outputs if the model classifies it as violating OpenAI's content policy. + */ + public moderations: Moderations; private sdkConfiguration: SDKConfiguration; @@ -85,6 +135,16 @@ export class Gpt { retryConfig: props?.retryConfig, }); - this.openAI = new OpenAI(this.sdkConfiguration); + this.audio = new Audio(this.sdkConfiguration); + this.chat = new Chat(this.sdkConfiguration); + this.completions = new Completions(this.sdkConfiguration); + this.edits = new Edits(this.sdkConfiguration); + this.embeddings = new Embeddings(this.sdkConfiguration); + this.files = new Files(this.sdkConfiguration); + this.fineTunes = new FineTunes(this.sdkConfiguration); + this.fineTuning = new FineTuning(this.sdkConfiguration); + this.images = new Images(this.sdkConfiguration); + this.models = new Models(this.sdkConfiguration); + this.moderations = new Moderations(this.sdkConfiguration); } } From d635d4fbccac61fee1ac0dbbb7bb8dbec46a072b Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Fri, 20 Oct 2023 00:55:29 +0000 Subject: [PATCH 64/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.101.0 --- README.md | 2 +- RELEASES.md | 12 +++++++++++- USAGE.md | 2 +- docs/models/shared/createembeddingrequest.md | 1 + .../createembeddingrequestencodingformat.md | 11 +++++++++++ docs/sdks/audio/README.md | 4 ++-- docs/sdks/chat/README.md | 16 ++++++++-------- docs/sdks/embeddings/README.md | 2 ++ docs/sdks/files/README.md | 10 +++++----- docs/sdks/images/README.md | 2 +- files.gen | 1 + gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- src/sdk/models/shared/createembeddingrequest.ts | 15 +++++++++++++++ src/sdk/sdk.ts | 6 +++--- 16 files changed, 69 insertions(+), 29 deletions(-) create mode 100755 docs/models/shared/createembeddingrequestencodingformat.md diff --git a/README.md b/README.md index 4f3f49e..7179fb5 100755 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ import { CreateTranscriptionRequestResponseFormat } from "@speakeasy-api/openai/ const res = await sdk.audio.createTranscription({ file: { content: "\#BbTW'zX9" as bytes <<<>>>, - file: "Buckinghamshire", + file: "green", }, model: "whisper-1", }); diff --git a/RELEASES.md b/RELEASES.md index fe536d0..f33553f 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -710,4 +710,14 @@ Based on: ### Generated - [typescript v2.26.1] . ### Releases -- [NPM v2.26.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.26.1 - . \ No newline at end of file +- [NPM v2.26.1] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.26.1 - . + +## 2023-10-20 00:55:06 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.101.0 (2.161.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.26.2] . +### Releases +- [NPM v2.26.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.26.2 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index f9ff825..b73da84 100755 --- a/USAGE.md +++ b/USAGE.md @@ -15,7 +15,7 @@ import { CreateTranscriptionRequestResponseFormat } from "@speakeasy-api/openai/ const res = await sdk.audio.createTranscription({ file: { content: "\#BbTW'zX9" as bytes <<<>>>, - file: "Buckinghamshire", + file: "green", }, model: "whisper-1", }); diff --git a/docs/models/shared/createembeddingrequest.md b/docs/models/shared/createembeddingrequest.md index b780407..829e45b 100755 --- a/docs/models/shared/createembeddingrequest.md +++ b/docs/models/shared/createembeddingrequest.md @@ -5,6 +5,7 @@ | Field | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `encodingFormat` | [CreateEmbeddingRequestEncodingFormat](../../models/shared/createembeddingrequestencodingformat.md) | :heavy_minus_sign: | The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). | float | | `input` | *any* | :heavy_check_mark: | Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
| The quick brown fox jumped over the lazy dog | | `model` | *any* | :heavy_check_mark: | ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
| text-embedding-ada-002 | | `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | \ No newline at end of file diff --git a/docs/models/shared/createembeddingrequestencodingformat.md b/docs/models/shared/createembeddingrequestencodingformat.md new file mode 100755 index 0000000..df614a6 --- /dev/null +++ b/docs/models/shared/createembeddingrequestencodingformat.md @@ -0,0 +1,11 @@ +# CreateEmbeddingRequestEncodingFormat + +The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). + + +## Values + +| Name | Value | +| -------- | -------- | +| `Float` | float | +| `Base64` | base64 | \ No newline at end of file diff --git a/docs/sdks/audio/README.md b/docs/sdks/audio/README.md index 0241115..8cd1ce3 100755 --- a/docs/sdks/audio/README.md +++ b/docs/sdks/audio/README.md @@ -30,7 +30,7 @@ import { CreateTranscriptionRequestResponseFormat } from "@speakeasy-api/openai/ const res = await sdk.audio.createTranscription({ file: { content: "\#BbTW'zX9" as bytes <<<>>>, - file: "Buckinghamshire", + file: "green", }, model: "whisper-1", }); @@ -73,7 +73,7 @@ import { Gpt } from "@speakeasy-api/openai"; const res = await sdk.audio.createTranslation({ file: { content: "M57UL;W3rx" as bytes <<<>>>, - file: "Reggae Toys silver", + file: "Bicycle", }, model: "whisper-1", }); diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index b0eed08..64d047d 100755 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -30,28 +30,28 @@ import { ChatCompletionRequestMessageRole } from "@speakeasy-api/openai/dist/sdk functionCall: "Hybrid", functions: [ { - name: "Hoboken reinvent Web", + name: "Diesel", parameters: { - "Southeast": "International", + "Money": "Web", }, }, ], logitBias: { - "incidunt": 432116, + "Southeast": 652538, }, messages: [ { - content: "abbreviate", + content: "Planner", functionCall: { - arguments: "Directives Chair", - name: "Northeast frictionless Park", + arguments: "Modern", + name: "alarm", }, - role: ChatCompletionRequestMessageRole.Assistant, + role: ChatCompletionRequestMessageRole.System, }, ], model: "gpt-3.5-turbo", n: 1, - stop: "Future", + stop: "Chair", temperature: 1, topP: 1, user: "user-1234", diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index d49752b..0d1a6cb 100755 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -17,6 +17,7 @@ Creates an embedding vector representing the input text. ```typescript import { Gpt } from "@speakeasy-api/openai"; +import { CreateEmbeddingRequestEncodingFormat } from "@speakeasy-api/openai/dist/sdk/models/shared"; (async() => { const sdk = new Gpt({ @@ -26,6 +27,7 @@ import { Gpt } from "@speakeasy-api/openai"; }); const res = await sdk.embeddings.createEmbedding({ + encodingFormat: CreateEmbeddingRequestEncodingFormat.Float, input: "The quick brown fox jumped over the lazy dog", model: "text-embedding-ada-002", user: "user-1234", diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 254f81c..7cb0a5e 100755 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -34,9 +34,9 @@ import { Gpt } from "@speakeasy-api/openai"; const res = await sdk.files.createFile({ file: { content: "`'$Z`(L/RH" as bytes <<<>>>, - file: "Rap National", + file: "Persevering", }, - purpose: "Female synergistic Maine", + purpose: "produce", }); if (res.statusCode == 200) { @@ -75,7 +75,7 @@ import { Gpt } from "@speakeasy-api/openai"; }); const res = await sdk.files.deleteFile({ - fileId: "yellow kiddingly white", + fileId: "Porsche", }); if (res.statusCode == 200) { @@ -114,7 +114,7 @@ import { Gpt } from "@speakeasy-api/openai"; }); const res = await sdk.files.downloadFile({ - fileId: "Maserati Bronze Audi", + fileId: "gosh", }); if (res.statusCode == 200) { @@ -189,7 +189,7 @@ import { Gpt } from "@speakeasy-api/openai"; }); const res = await sdk.files.retrieveFile({ - fileId: "online Facilitator enfold", + fileId: "Developer", }); if (res.statusCode == 200) { diff --git a/docs/sdks/images/README.md b/docs/sdks/images/README.md index 0f3d3b1..46d6b5e 100755 --- a/docs/sdks/images/README.md +++ b/docs/sdks/images/README.md @@ -79,7 +79,7 @@ import { CreateImageEditRequestResponseFormat, CreateImageEditRequestSize } from }, mask: { content: "`^YjrpxopK" as bytes <<<>>>, - mask: "Rap Dodge Incredible", + mask: "plum", }, n: 1, prompt: "A cute baby sea otter wearing a beret", diff --git a/files.gen b/files.gen index 1099008..385b95b 100755 --- a/files.gen +++ b/files.gen @@ -174,6 +174,7 @@ docs/models/shared/createeditrequest.md docs/models/shared/createembeddingresponseusage.md docs/models/shared/createembeddingresponse.md docs/models/shared/embedding.md +docs/models/shared/createembeddingrequestencodingformat.md docs/models/shared/createembeddingrequest.md docs/models/shared/openaifile.md docs/models/shared/createfilerequestfile.md diff --git a/gen.yaml b/gen.yaml index 1c5aef2..a034ad7 100644 --- a/gen.yaml +++ b/gen.yaml @@ -1,9 +1,9 @@ configVersion: 1.0.0 management: - docChecksum: d8810f26858855d4c1e35d5ef57d39ad + docChecksum: b421f9981e573fbdad69da1d6fbac49f docVersion: 2.0.0 - speakeasyVersion: 1.97.1 - generationVersion: 2.152.1 + speakeasyVersion: 1.101.0 + generationVersion: 2.161.0 generation: sdkClassName: gpt sdkFlattening: true @@ -16,7 +16,7 @@ features: globalSecurity: 2.82.0 globalServerURLs: 2.82.0 typescript: - version: 2.26.1 + version: 2.26.2 author: speakeasy-openai flattenGlobalSecurity: false maxMethodParams: 0 diff --git a/package-lock.json b/package-lock.json index 4105cea..0ecb116 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.26.1", + "version": "2.26.2", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.26.1", + "version": "2.26.2", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 793b233..2079459 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.26.1", + "version": "2.26.2", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build", diff --git a/src/sdk/models/shared/createembeddingrequest.ts b/src/sdk/models/shared/createembeddingrequest.ts index 4b49204..92751cb 100755 --- a/src/sdk/models/shared/createembeddingrequest.ts +++ b/src/sdk/models/shared/createembeddingrequest.ts @@ -5,7 +5,22 @@ import { SpeakeasyBase, SpeakeasyMetadata } from "../../../internal/utils"; import { Expose } from "class-transformer"; +/** + * The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). + */ +export enum CreateEmbeddingRequestEncodingFormat { + Float = "float", + Base64 = "base64", +} + export class CreateEmbeddingRequest extends SpeakeasyBase { + /** + * The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). + */ + @SpeakeasyMetadata() + @Expose({ name: "encoding_format" }) + encodingFormat?: CreateEmbeddingRequestEncodingFormat; + /** * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. * diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 897ec56..dd68b04 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -59,9 +59,9 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.26.1"; - genVersion = "2.152.1"; - userAgent = "speakeasy-sdk/typescript 2.26.1 2.152.1 2.0.0 @speakeasy-api/openai"; + sdkVersion = "2.26.2"; + genVersion = "2.161.0"; + userAgent = "speakeasy-sdk/typescript 2.26.2 2.161.0 2.0.0 @speakeasy-api/openai"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From 85cf092b916403ec1a60a386e1a0a1cdf161bc24 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sat, 21 Oct 2023 00:54:18 +0000 Subject: [PATCH 65/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.104.0 --- README.md | 2 +- RELEASES.md | 12 +++++++++++- USAGE.md | 2 +- docs/sdks/audio/README.md | 4 ++-- docs/sdks/chat/README.md | 18 +++++++++--------- docs/sdks/completions/README.md | 8 ++++---- docs/sdks/files/README.md | 10 +++++----- docs/sdks/finetunes/README.md | 2 +- docs/sdks/finetuning/README.md | 2 +- docs/sdks/images/README.md | 2 +- docs/sdks/moderations/README.md | 2 +- gen.yaml | 12 ++++++++---- package-lock.json | 4 ++-- package.json | 2 +- src/internal/utils/queryparams.ts | 8 ++++---- .../shared/createtranscriptionrequest.ts | 2 +- .../models/shared/createtranslationrequest.ts | 2 +- src/sdk/sdk.ts | 6 +++--- 18 files changed, 57 insertions(+), 43 deletions(-) diff --git a/README.md b/README.md index 7179fb5..6b943e1 100755 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ import { CreateTranscriptionRequestResponseFormat } from "@speakeasy-api/openai/ const res = await sdk.audio.createTranscription({ file: { content: "\#BbTW'zX9" as bytes <<<>>>, - file: "green", + file: "string", }, model: "whisper-1", }); diff --git a/RELEASES.md b/RELEASES.md index f33553f..1b4e46c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -720,4 +720,14 @@ Based on: ### Generated - [typescript v2.26.2] . ### Releases -- [NPM v2.26.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.26.2 - . \ No newline at end of file +- [NPM v2.26.2] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.26.2 - . + +## 2023-10-21 00:53:53 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.104.0 (2.169.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.27.0] . +### Releases +- [NPM v2.27.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.27.0 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index b73da84..2ce590c 100755 --- a/USAGE.md +++ b/USAGE.md @@ -15,7 +15,7 @@ import { CreateTranscriptionRequestResponseFormat } from "@speakeasy-api/openai/ const res = await sdk.audio.createTranscription({ file: { content: "\#BbTW'zX9" as bytes <<<>>>, - file: "green", + file: "string", }, model: "whisper-1", }); diff --git a/docs/sdks/audio/README.md b/docs/sdks/audio/README.md index 8cd1ce3..b3fe369 100755 --- a/docs/sdks/audio/README.md +++ b/docs/sdks/audio/README.md @@ -30,7 +30,7 @@ import { CreateTranscriptionRequestResponseFormat } from "@speakeasy-api/openai/ const res = await sdk.audio.createTranscription({ file: { content: "\#BbTW'zX9" as bytes <<<>>>, - file: "green", + file: "string", }, model: "whisper-1", }); @@ -73,7 +73,7 @@ import { Gpt } from "@speakeasy-api/openai"; const res = await sdk.audio.createTranslation({ file: { content: "M57UL;W3rx" as bytes <<<>>>, - file: "Bicycle", + file: "string", }, model: "whisper-1", }); diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 64d047d..d1b40da 100755 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -27,31 +27,31 @@ import { ChatCompletionRequestMessageRole } from "@speakeasy-api/openai/dist/sdk }); const res = await sdk.chat.createChatCompletion({ - functionCall: "Hybrid", + functionCall: "string", functions: [ { - name: "Diesel", + name: "string", parameters: { - "Money": "Web", + "key": "string", }, }, ], logitBias: { - "Southeast": 652538, + "key": 770726, }, messages: [ { - content: "Planner", + content: "string", functionCall: { - arguments: "Modern", - name: "alarm", + arguments: "string", + name: "string", }, - role: ChatCompletionRequestMessageRole.System, + role: ChatCompletionRequestMessageRole.Assistant, }, ], model: "gpt-3.5-turbo", n: 1, - stop: "Chair", + stop: "string", temperature: 1, topP: 1, user: "user-1234", diff --git a/docs/sdks/completions/README.md b/docs/sdks/completions/README.md index bb78a63..79eab0a 100755 --- a/docs/sdks/completions/README.md +++ b/docs/sdks/completions/README.md @@ -27,13 +27,13 @@ import { Gpt } from "@speakeasy-api/openai"; const res = await sdk.completions.createCompletion({ logitBias: { - "red": 242695, + "key": 160667, }, maxTokens: 16, - model: "Fresh", + model: "string", n: 1, - prompt: "Reggae", - stop: "Fluorine", + prompt: "string", + stop: "string", suffix: "test.", temperature: 1, topP: 1, diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 7cb0a5e..9348333 100755 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -34,9 +34,9 @@ import { Gpt } from "@speakeasy-api/openai"; const res = await sdk.files.createFile({ file: { content: "`'$Z`(L/RH" as bytes <<<>>>, - file: "Persevering", + file: "string", }, - purpose: "produce", + purpose: "string", }); if (res.statusCode == 200) { @@ -75,7 +75,7 @@ import { Gpt } from "@speakeasy-api/openai"; }); const res = await sdk.files.deleteFile({ - fileId: "Porsche", + fileId: "string", }); if (res.statusCode == 200) { @@ -114,7 +114,7 @@ import { Gpt } from "@speakeasy-api/openai"; }); const res = await sdk.files.downloadFile({ - fileId: "gosh", + fileId: "string", }); if (res.statusCode == 200) { @@ -189,7 +189,7 @@ import { Gpt } from "@speakeasy-api/openai"; }); const res = await sdk.files.retrieveFile({ - fileId: "Developer", + fileId: "string", }); if (res.statusCode == 200) { diff --git a/docs/sdks/finetunes/README.md b/docs/sdks/finetunes/README.md index dfa4760..8f4e9d8 100755 --- a/docs/sdks/finetunes/README.md +++ b/docs/sdks/finetunes/README.md @@ -97,7 +97,7 @@ import { Gpt } from "@speakeasy-api/openai"; 2, ], hyperparameters: { - nEpochs: "plum", + nEpochs: "string", }, model: "curie", trainingFile: "file-abc123", diff --git a/docs/sdks/finetuning/README.md b/docs/sdks/finetuning/README.md index 3cbfb74..8207ed4 100755 --- a/docs/sdks/finetuning/README.md +++ b/docs/sdks/finetuning/README.md @@ -87,7 +87,7 @@ import { Gpt } from "@speakeasy-api/openai"; const res = await sdk.fineTuning.createFineTuningJob({ hyperparameters: { - nEpochs: "empower", + nEpochs: "string", }, model: "gpt-3.5-turbo", trainingFile: "file-abc123", diff --git a/docs/sdks/images/README.md b/docs/sdks/images/README.md index 46d6b5e..bdd7946 100755 --- a/docs/sdks/images/README.md +++ b/docs/sdks/images/README.md @@ -79,7 +79,7 @@ import { CreateImageEditRequestResponseFormat, CreateImageEditRequestSize } from }, mask: { content: "`^YjrpxopK" as bytes <<<>>>, - mask: "plum", + mask: "string", }, n: 1, prompt: "A cute baby sea otter wearing a beret", diff --git a/docs/sdks/moderations/README.md b/docs/sdks/moderations/README.md index 699ca4d..22a5140 100755 --- a/docs/sdks/moderations/README.md +++ b/docs/sdks/moderations/README.md @@ -26,7 +26,7 @@ import { Gpt } from "@speakeasy-api/openai"; }); const res = await sdk.moderations.createModeration({ - input: "stable", + input: "string", model: "text-moderation-stable", }); diff --git a/gen.yaml b/gen.yaml index a034ad7..51d0948 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,22 +2,26 @@ configVersion: 1.0.0 management: docChecksum: b421f9981e573fbdad69da1d6fbac49f docVersion: 2.0.0 - speakeasyVersion: 1.101.0 - generationVersion: 2.161.0 + speakeasyVersion: 1.104.0 + generationVersion: 2.169.0 generation: + repoURL: https://github.com/speakeasy-sdks/openai-ts-sdk.git sdkClassName: gpt sdkFlattening: true singleTagPerOp: false telemetryEnabled: false features: typescript: - core: 2.90.4 + core: 2.93.0 deprecations: 2.81.1 globalSecurity: 2.82.0 globalServerURLs: 2.82.0 typescript: - version: 2.26.2 + version: 2.27.0 author: speakeasy-openai flattenGlobalSecurity: false + installationURL: https://github.com/speakeasy-sdks/openai-ts-sdk maxMethodParams: 0 packageName: '@speakeasy-api/openai' + published: true + repoSubDirectory: . diff --git a/package-lock.json b/package-lock.json index 0ecb116..a26f319 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.26.2", + "version": "2.27.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.26.2", + "version": "2.27.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 2079459..361293c 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.26.2", + "version": "2.27.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build", diff --git a/src/internal/utils/queryparams.ts b/src/internal/utils/queryparams.ts index 90f5653..0279e00 100755 --- a/src/internal/utils/queryparams.ts +++ b/src/internal/utils/queryparams.ts @@ -135,7 +135,8 @@ function noExplodeSerializer(params: Record, delimiter = ","): stri if (qpDecorator == null) return; - return `${paramKey}${delimiter}${valToString(value[paramKey])}`; + const key = qpDecorator.ParamName || paramKey + return `${key}${delimiter}${valToString(value[paramKey])}`; }) .join(delimiter); query.push(`${key}=${encodeURIComponent(values)}`); @@ -177,9 +178,8 @@ function formSerializerExplode(params: Record): string { if (qpDecorator == null) return; - return `${paramKey}=${encodeURIComponent( - valToString(value[paramKey]) - )}`; + const key = qpDecorator.ParamName || paramKey; + return `${key}=${encodeURIComponent(valToString(value[paramKey]))}`; }) .join("&") ); diff --git a/src/sdk/models/shared/createtranscriptionrequest.ts b/src/sdk/models/shared/createtranscriptionrequest.ts index c547668..0dae48e 100755 --- a/src/sdk/models/shared/createtranscriptionrequest.ts +++ b/src/sdk/models/shared/createtranscriptionrequest.ts @@ -51,7 +51,7 @@ export class CreateTranscriptionRequest extends SpeakeasyBase { * @remarks * */ - @SpeakeasyMetadata({ data: "multipart_form, name=model;json=true" }) + @SpeakeasyMetadata({ data: "multipart_form, name=model" }) model: any; /** diff --git a/src/sdk/models/shared/createtranslationrequest.ts b/src/sdk/models/shared/createtranslationrequest.ts index 4c95f6e..48d5752 100755 --- a/src/sdk/models/shared/createtranslationrequest.ts +++ b/src/sdk/models/shared/createtranslationrequest.ts @@ -28,7 +28,7 @@ export class CreateTranslationRequest extends SpeakeasyBase { * @remarks * */ - @SpeakeasyMetadata({ data: "multipart_form, name=model;json=true" }) + @SpeakeasyMetadata({ data: "multipart_form, name=model" }) model: any; /** diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index dd68b04..14f79be 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -59,9 +59,9 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.26.2"; - genVersion = "2.161.0"; - userAgent = "speakeasy-sdk/typescript 2.26.2 2.161.0 2.0.0 @speakeasy-api/openai"; + sdkVersion = "2.27.0"; + genVersion = "2.169.0"; + userAgent = "speakeasy-sdk/typescript 2.27.0 2.169.0 2.0.0 @speakeasy-api/openai"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init); From f6e60dc9c96ef8a7e8949e38c9d6030292df367d Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 25 Oct 2023 00:55:39 +0000 Subject: [PATCH 66/66] ci: regenerated with OpenAPI Doc 2.0.0, Speakeay CLI 1.107.0 --- RELEASES.md | 12 +++++++++++- gen.yaml | 8 ++++---- package-lock.json | 4 ++-- package.json | 2 +- src/internal/utils/security.ts | 2 +- src/sdk/sdk.ts | 6 +++--- 6 files changed, 22 insertions(+), 12 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 1b4e46c..58fb3d0 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -730,4 +730,14 @@ Based on: ### Generated - [typescript v2.27.0] . ### Releases -- [NPM v2.27.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.27.0 - . \ No newline at end of file +- [NPM v2.27.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.27.0 - . + +## 2023-10-25 00:55:14 +### Changes +Based on: +- OpenAPI Doc 2.0.0 https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml +- Speakeasy CLI 1.107.0 (2.171.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [typescript v2.28.0] . +### Releases +- [NPM v2.28.0] https://www.npmjs.com/package/@speakeasy-api/openai/v/2.28.0 - . \ No newline at end of file diff --git a/gen.yaml b/gen.yaml index 51d0948..4196e58 100644 --- a/gen.yaml +++ b/gen.yaml @@ -2,8 +2,8 @@ configVersion: 1.0.0 management: docChecksum: b421f9981e573fbdad69da1d6fbac49f docVersion: 2.0.0 - speakeasyVersion: 1.104.0 - generationVersion: 2.169.0 + speakeasyVersion: 1.107.0 + generationVersion: 2.171.0 generation: repoURL: https://github.com/speakeasy-sdks/openai-ts-sdk.git sdkClassName: gpt @@ -12,12 +12,12 @@ generation: telemetryEnabled: false features: typescript: - core: 2.93.0 + core: 2.94.0 deprecations: 2.81.1 globalSecurity: 2.82.0 globalServerURLs: 2.82.0 typescript: - version: 2.27.0 + version: 2.28.0 author: speakeasy-openai flattenGlobalSecurity: false installationURL: https://github.com/speakeasy-sdks/openai-ts-sdk diff --git a/package-lock.json b/package-lock.json index a26f319..6fdda9e 100755 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@speakeasy-api/openai", - "version": "2.27.0", + "version": "2.28.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@speakeasy-api/openai", - "version": "2.27.0", + "version": "2.28.0", "dependencies": { "axios": "^1.1.3", "class-transformer": "^0.5.1", diff --git a/package.json b/package.json index 361293c..b4aaf00 100755 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@speakeasy-api/openai", - "version": "2.27.0", + "version": "2.28.0", "author": "speakeasy-openai", "scripts": { "prepare": "tsc --build", diff --git a/src/internal/utils/security.ts b/src/internal/utils/security.ts index 8f183b4..5071b33 100755 --- a/src/internal/utils/security.ts +++ b/src/internal/utils/security.ts @@ -179,7 +179,7 @@ function parseSecuritySchemeValue( properties.headers[securityDecorator.Name] = value; break; case "oauth2": - properties.headers[securityDecorator.Name] = value; + properties.headers[securityDecorator.Name] = value.toLowerCase().startsWith("bearer ") ? value : `Bearer ${value}`; break; case "http": switch (schemeDecorator.SubType) { diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 14f79be..e847454 100755 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -59,9 +59,9 @@ export class SDKConfiguration { serverDefaults: any; language = "typescript"; openapiDocVersion = "2.0.0"; - sdkVersion = "2.27.0"; - genVersion = "2.169.0"; - userAgent = "speakeasy-sdk/typescript 2.27.0 2.169.0 2.0.0 @speakeasy-api/openai"; + sdkVersion = "2.28.0"; + genVersion = "2.171.0"; + userAgent = "speakeasy-sdk/typescript 2.28.0 2.171.0 2.0.0 @speakeasy-api/openai"; retryConfig?: utils.RetryConfig; public constructor(init?: Partial) { Object.assign(this, init);