diff --git a/.eslintrc.js b/.eslintrc.js
new file mode 100644
index 000000000..60f0e7a35
--- /dev/null
+++ b/.eslintrc.js
@@ -0,0 +1,10 @@
+module.exports = {
+ parser: '@typescript-eslint/parser',
+ plugins: ['@typescript-eslint', 'unused-imports', 'prettier'],
+ rules: {
+ 'no-unused-vars': 'off',
+ 'prettier/prettier': 'error',
+ 'unused-imports/no-unused-imports': 'error',
+ },
+ root: true,
+};
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 5aaecc4ea..72fedbf5a 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -1,6 +1,6 @@
name: Bug report
description: Create a report to help us improve
-labels: ["bug"]
+labels: ['bug']
body:
- type: markdown
attributes:
@@ -53,4 +53,4 @@ body:
label: Library version
placeholder: openai v3.0.1
validations:
- required: true
\ No newline at end of file
+ required: true
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 9b58e06f0..6f6c3bfdb 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -4,4 +4,4 @@ contact_links:
url: https://help.openai.com/
about: |
Please only file issues here that you believe represent actual bugs or feature requests for the OpenAI Node library.
- If you're having general trouble with the OpenAI API, please visit our help center to get support.
\ No newline at end of file
+ If you're having general trouble with the OpenAI API, please visit our help center to get support.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
index c61321f7b..1be579964 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.yml
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -1,6 +1,6 @@
name: Feature request
description: Suggest an idea for this library
-labels: ["feature-request"]
+labels: ['feature-request']
body:
- type: markdown
attributes:
@@ -17,4 +17,4 @@ body:
id: context
attributes:
label: Additional context
- description: Add any other context about the feature request here.
\ No newline at end of file
+ description: Add any other context about the feature request here.
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 8b1851595..ceaf21b5d 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -2,24 +2,23 @@ name: Node.js CI
on:
push:
- branches: [ master ]
+ branches: [master]
pull_request:
- branches: [ master ]
+ branches: [master]
jobs:
build:
-
runs-on: ubuntu-latest
strategy:
matrix:
node-version: [16.x, 18.x]
steps:
- - uses: actions/checkout@v3
- - name: Use Node.js ${{ matrix.node-version }}
- uses: actions/setup-node@v3
- with:
- node-version: ${{ matrix.node-version }}
- cache: 'npm'
- - run: npm ci
- - run: npm run build
+ - uses: actions/checkout@v3
+ - name: Use Node.js ${{ matrix.node-version }}
+ uses: actions/setup-node@v3
+ with:
+ node-version: ${{ matrix.node-version }}
+ cache: 'npm'
+ - run: npm ci
+ - run: npm run build
diff --git a/.gitignore b/.gitignore
index 07e6e472c..314b24627 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,6 @@
-/node_modules
+node_modules
+yarn-error.log
+codegen.log
+dist
+/deno
+/*.tgz
diff --git a/.npmignore b/.npmignore
deleted file mode 100644
index 999d88df6..000000000
--- a/.npmignore
+++ /dev/null
@@ -1 +0,0 @@
-# empty npmignore to ensure all required files (e.g., in the dist folder) are published by npm
\ No newline at end of file
diff --git a/.openapi-generator-ignore b/.openapi-generator-ignore
deleted file mode 100644
index ce0eef51f..000000000
--- a/.openapi-generator-ignore
+++ /dev/null
@@ -1,5 +0,0 @@
-.gitignore
-README.md
-git_push.sh
-package.json
-package-lock.json
\ No newline at end of file
diff --git a/.openapi-generator/FILES b/.openapi-generator/FILES
deleted file mode 100644
index daef44100..000000000
--- a/.openapi-generator/FILES
+++ /dev/null
@@ -1,6 +0,0 @@
-.npmignore
-api.ts
-base.ts
-common.ts
-configuration.ts
-index.ts
diff --git a/.openapi-generator/VERSION b/.openapi-generator/VERSION
deleted file mode 100644
index cd802a1ec..000000000
--- a/.openapi-generator/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-6.6.0
\ No newline at end of file
diff --git a/.prettierignore b/.prettierignore
new file mode 100644
index 000000000..804a75c60
--- /dev/null
+++ b/.prettierignore
@@ -0,0 +1,4 @@
+CHANGELOG.md
+/ecosystem-tests
+/node_modules
+/deno
diff --git a/.prettierrc b/.prettierrc
new file mode 100644
index 000000000..6f72f437c
--- /dev/null
+++ b/.prettierrc
@@ -0,0 +1,6 @@
+{
+ "arrowParens": "always",
+ "trailingComma": "all",
+ "singleQuote": true,
+ "printWidth": 110
+}
diff --git a/.stats.yml b/.stats.yml
new file mode 100644
index 000000000..c125dfb22
--- /dev/null
+++ b/.stats.yml
@@ -0,0 +1 @@
+configured_endpoints: 23
diff --git a/LICENSE b/LICENSE
index 4f14854c3..7b1b36a64 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,21 +1,201 @@
-The MIT License
-
-Copyright (c) OpenAI (https://openai.com)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2023 OpenAI
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
index 765d73647..1c139cab7 100644
--- a/README.md
+++ b/README.md
@@ -1,89 +1,278 @@
-# OpenAI Node.js Library
+# OpenAI Node API Library
-The OpenAI Node.js library provides convenient access to the OpenAI API from Node.js applications. Most of the code in this library is generated from our [OpenAPI specification](https://github.com/openai/openai-openapi).
+[![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai)
-> ⚠️ **Important note: this library is meant for server-side usage only, as using it in client-side browser code will expose your secret API key. [See here](https://platform.openai.com/docs/api-reference/authentication) for more details.**
+This library provides convenient access to the OpenAI REST API from TypeScript or JavaScript.
+
+It is generated from our [OpenAPI specification](https://github.com/openai/openai-openapi) with [Stainless](https://stainlessapi.com/).
+
+To learn how to use the OpenAI API, check out our [API Reference](https://platform.openai.com/docs/api-reference) and [Documentation](https://platform.openai.com/docs).
## Installation
-```bash
-npm install openai
+```sh
+npm install --save openai
+# or
+yarn add openai
```
## Usage
-The library needs to be configured with your account's secret key, which is available in your [OpenAI account page](https://platform.openai.com/account/api-keys). We recommend setting it as an environment variable. Here's an example of initializing the library with the API key loaded from an environment variable and creating a completion:
+> [!IMPORTANT]
+> Previous versions of this SDK used a `Configuration` class. See the [v3 to v4 migration guide](https://github.com/openai/openai-node/discussions/217).
-```javascript
-const { Configuration, OpenAIApi } = require("openai");
+```js
+import OpenAI from 'openai';
-const configuration = new Configuration({
- apiKey: process.env.OPENAI_API_KEY,
+const openai = new OpenAI({
+ apiKey: 'my api key', // defaults to process.env["OPENAI_API_KEY"]
});
-const openai = new OpenAIApi(configuration);
-const chatCompletion = await openai.createChatCompletion({
- model: "gpt-3.5-turbo",
- messages: [{role: "user", content: "Hello world"}],
-});
-console.log(chatCompletion.data.choices[0].message);
+async function main() {
+ const completion = await openai.chat.completions.create({
+ messages: [{ role: 'user', content: 'Say this is a test' }],
+ model: 'gpt-3.5-turbo',
+ });
+
+ console.log(completion.choices);
+}
+
+main();
```
-Check out the [full API documentation](https://platform.openai.com/docs/api-reference?lang=node.js) for examples of all the available functions.
+## Streaming Responses
+
+We provide support for streaming responses using Server Side Events (SSE).
-### Request options
+```ts
+import OpenAI from 'openai';
-All of the available API request functions additionally contain an optional final parameter where you can pass custom [axios request options](https://axios-http.com/docs/req_config), for example:
+const openai = new OpenAI();
-```javascript
-const completion = await openai.createCompletion(
- {
- model: "text-davinci-003",
- prompt: "Hello world",
- },
- {
- timeout: 1000,
- headers: {
- "Example-Header": "example",
- },
+async function main() {
+ const stream = await openai.chat.completions.create({
+ model: 'gpt-4',
+ messages: [{ role: 'user', content: 'Say this is a test' }],
+ stream: true,
+ });
+ for await (const part of stream) {
+ process.stdout.write(part.choices[0]?.delta?.content || '');
}
-);
+}
+
+main();
```
-### Error handling
+If you need to cancel a stream, you can `break` from the loop
+or call `stream.controller.abort()`.
-API requests can potentially return errors due to invalid inputs or other issues. These errors can be handled with a `try...catch` statement, and the error details can be found in either `error.response` or `error.message`:
+### Request & Response types
-```javascript
-try {
- const completion = await openai.createCompletion({
- model: "text-davinci-003",
- prompt: "Hello world",
- });
- console.log(completion.data.choices[0].text);
-} catch (error) {
- if (error.response) {
- console.log(error.response.status);
- console.log(error.response.data);
- } else {
- console.log(error.message);
- }
+This library includes TypeScript definitions for all request params and response fields. You may import and use them like so:
+
+```ts
+import OpenAI from 'openai';
+
+const openai = new OpenAI({
+ apiKey: 'my api key', // defaults to process.env["OPENAI_API_KEY"]
+});
+
+async function main() {
+ const params: OpenAI.Chat.CompletionCreateParams = {
+ messages: [{ role: 'user', content: 'Say this is a test' }],
+ model: 'gpt-3.5-turbo',
+ };
+ const completion: OpenAI.Chat.ChatCompletion = await openai.chat.completions.create(params);
+}
+
+main();
+```
+
+Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors.
+
+## File Uploads
+
+Request parameters that correspond to file uploads can be passed in many different forms:
+
+- `File` (or an object with the same structure)
+- a `fetch` `Response` (or an object with the same structure)
+- an `fs.ReadStream`
+- the return value of our `toFile` helper
+
+```ts
+import fs from 'fs';
+import fetch from 'node-fetch';
+import OpenAI, { toFile } from 'openai';
+
+const openai = new OpenAI();
+
+// If you have access to Node `fs` we recommend using `fs.createReadStream()`:
+await openai.files.create({ file: fs.createReadStream('input.jsonl'), purpose: 'fine-tune' });
+
+// Or if you have the web `File` API you can pass a `File` instance:
+await openai.files.create({ file: new File(['my bytes'], 'input.jsonl'), purpose: 'fine-tune' });
+
+// You can also pass a `fetch` `Response`:
+await openai.files.create({ file: await fetch('https://somesite/input.jsonl'), purpose: 'fine-tune' });
+
+// Finally, if none of the above are convenient, you can use our `toFile` helper:
+await openai.files.create({
+ file: await toFile(Buffer.from('my bytes'), 'input.jsonl'),
+ purpose: 'fine-tune',
+});
+await openai.files.create({
+ file: await toFile(new Uint8Array([0, 1, 2]), 'input.jsonl'),
+ purpose: 'fine-tune',
+});
+```
+
+## Handling errors
+
+When the library is unable to connect to the API,
+or if the API returns a non-success status code (i.e., 4xx or 5xx response),
+a subclass of `APIError` will be thrown:
+
+```ts
+async function main() {
+ const fineTune = await openai.fineTunes
+ .create({ training_file: 'file-XGinujblHPwGLSztz8cPS8XY' })
+ .catch((err) => {
+ if (err instanceof OpenAI.APIError) {
+ console.log(err.status); // 400
+ console.log(err.name); // BadRequestError
+
+ console.log(err.headers); // {server: 'nginx', ...}
+ } else {
+ throw err;
+ }
+ });
}
+
+main();
```
-### Streaming completions
+Error codes are as followed:
+
+| Status Code | Error Type |
+| ----------- | -------------------------- |
+| 400 | `BadRequestError` |
+| 401 | `AuthenticationError` |
+| 403 | `PermissionDeniedError` |
+| 404 | `NotFoundError` |
+| 422 | `UnprocessableEntityError` |
+| 429 | `RateLimitError` |
+| >=500 | `InternalServerError` |
+| N/A | `APIConnectionError` |
+
+### Retries
+
+Certain errors will be automatically retried 2 times by default, with a short exponential backoff.
+Connection errors (for example, due to a network connectivity problem), 409 Conflict, 429 Rate Limit,
+and >=500 Internal errors will all be retried by default.
+
+You can use the `maxRetries` option to configure or disable this:
+
+
+```js
+// Configure the default for all requests:
+const openai = new OpenAI({
+ maxRetries: 0, // default is 2
+});
+
+// Or, configure per-request:
+await openai.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in Node.js?' }], model: 'gpt-3.5-turbo' }, {
+ maxRetries: 5,
+});
+```
+
+### Timeouts
+
+Requests time out after 10 minutes by default. You can configure this with a `timeout` option:
+
+
+```ts
+// Configure the default for all requests:
+const openai = new OpenAI({
+ timeout: 20 * 1000, // 20 seconds (default is 10 minutes)
+});
+
+// Override per-request:
+await openai.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-3.5-turbo' }, {
+ timeout: 5 * 1000,
+});
+```
+
+On timeout, an `APIConnectionTimeoutError` is thrown.
+
+Note that requests which time out will be [retried twice by default](#retries).
+
+## Advanced Usage
+
+### Accessing raw Response data (e.g., headers)
+
+The "raw" `Response` returned by `fetch()` can be accessed through the `.asResponse()` method on the `APIPromise` type that all methods return.
+
+You can also use the `.withResponse()` method to get the raw `Response` along with the parsed data.
+
+```ts
+const openai = new OpenAI();
+
+const response = await openai.chat.completions
+ .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' })
+ .asResponse();
+console.log(response.headers.get('X-My-Header'));
+console.log(response.statusText); // access the underlying Response object
+
+const { data: completions, response: raw } = await openai.chat.completions
+ .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' })
+ .withResponse();
+console.log(raw.headers.get('X-My-Header'));
+console.log(completions.choices);
+```
+
+## Configuring an HTTP(S) Agent (e.g., for proxies)
+
+By default, this library uses a stable agent for all http/https requests to reuse TCP connections, eliminating many TCP & TLS handshakes and shaving around 100ms off most requests.
+
+If you would like to disable or customize this behavior, for example to use the API behind a proxy, you can pass an `httpAgent` which is used for all requests (be they http or https), for example:
+
+
+```ts
+import http from 'http';
+import HttpsProxyAgent from 'https-proxy-agent';
+
+// Configure the default for all requests:
+const openai = new OpenAI({
+ httpAgent: new HttpsProxyAgent(process.env.PROXY_URL),
+});
+
+// Override per-request:
+await openai.models.list({
+ baseURL: 'http://localhost:8080/test-api',
+ httpAgent: new http.Agent({ keepAlive: false }),
+})
+```
+
+## Semantic Versioning
+
+This package generally attempts to follow [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions:
-Streaming completions (`stream=true`) are not natively supported in this package yet, but [a workaround exists](https://github.com/openai/openai-node/issues/18#issuecomment-1369996933) if needed.
+1. Changes that only affect static types, without breaking runtime behavior.
+2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals)_.
+3. Changes that we do not expect to impact the vast majority of users in practice.
-## Upgrade guide
+We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience.
-All breaking changes for major version releases are listed below.
+We are keen for your feedback; please open an [issue](https://www.github.com/openai/openai-node/issues) with questions, bugs, or suggestions.
-### 3.0.0
+## Requirements
-- The function signature of `createCompletion(engineId, params)` changed to `createCompletion(params)`. The value previously passed in as the `engineId` argument should now be passed in as `model` in the params object (e.g. `createCompletion({ model: "text-davinci-003", ... })`)
-- Replace any `createCompletionFromModel(params)` calls with `createCompletion(params)`
+The following runtimes are supported:
-## Thanks
+- Node.js 16 LTS or later ([non-EOL](https://endoflife.date/nodejs)) versions.
+- Deno v1.28.0 or higher, using `import OpenAI from "npm:openai"`.
+ Deno Deploy is not yet supported.
+- Cloudflare Workers.
+- Vercel Edge Runtime.
-Thank you to [ceifa](https://github.com/ceifa) for creating and maintaining the original unofficial `openai` npm package before we released this official library! ceifa's original package has been renamed to [gpt-x](https://www.npmjs.com/package/gpt-x).
+If you are interested in other runtime environments, please open or upvote an issue on GitHub.
diff --git a/api.md b/api.md
new file mode 100644
index 000000000..722e833a3
--- /dev/null
+++ b/api.md
@@ -0,0 +1,138 @@
+# Completions
+
+Types:
+
+- Completion
+- CompletionChoice
+- CompletionUsage
+
+Methods:
+
+- client.completions.create ({ ...params }) -> Completion
+
+# Chat
+
+## Completions
+
+Types:
+
+- ChatCompletion
+- ChatCompletionChunk
+- ChatCompletionMessage
+- CreateChatCompletionRequestMessage
+
+Methods:
+
+- client.chat.completions.create ({ ...params }) -> ChatCompletion
+
+# Edits
+
+Types:
+
+- Edit
+
+Methods:
+
+- client.edits.create ({ ...params }) -> Edit
+
+# Embeddings
+
+Types:
+
+- CreateEmbeddingResponse
+- Embedding
+
+Methods:
+
+- client.embeddings.create ({ ...params }) -> CreateEmbeddingResponse
+
+# Files
+
+Types:
+
+- FileContent
+- FileDeleted
+- FileObject
+
+Methods:
+
+- client.files.create ({ ...params }) -> FileObject
+- client.files.retrieve (fileId) -> FileObject
+- client.files.list () -> FileObjectsPage
+- client.files.del (fileId) -> FileDeleted
+- client.files.retrieveContent (fileId) -> string
+
+# Images
+
+Types:
+
+- Image
+- ImagesResponse
+
+Methods:
+
+- client.images.createVariation ({ ...params }) -> ImagesResponse
+- client.images.edit ({ ...params }) -> ImagesResponse
+- client.images.generate ({ ...params }) -> ImagesResponse
+
+# Audio
+
+## Transcriptions
+
+Types:
+
+- Transcription
+
+Methods:
+
+- client.audio.transcriptions.create ({ ...params }) -> Transcription
+
+## Translations
+
+Types:
+
+- Translation
+
+Methods:
+
+- client.audio.translations.create ({ ...params }) -> Translation
+
+# Moderations
+
+Types:
+
+- Moderation
+- ModerationCreateResponse
+
+Methods:
+
+- client.moderations.create ({ ...params }) -> ModerationCreateResponse
+
+# Models
+
+Types:
+
+- Model
+- ModelDeleted
+
+Methods:
+
+- client.models.retrieve (model) -> Model
+- client.models.list () -> ModelsPage
+- client.models.del (model) -> ModelDeleted
+
+# FineTunes
+
+Types:
+
+- FineTune
+- FineTuneEvent
+- FineTuneEventsListResponse
+
+Methods:
+
+- client.fineTunes.create ({ ...params }) -> FineTune
+- client.fineTunes.retrieve (fineTuneId) -> FineTune
+- client.fineTunes.list () -> FineTunesPage
+- client.fineTunes.cancel (fineTuneId) -> FineTune
+- client.fineTunes.listEvents (fineTuneId, { ...params }) -> FineTuneEventsListResponse
diff --git a/api.ts b/api.ts
deleted file mode 100644
index 10505c0d4..000000000
--- a/api.ts
+++ /dev/null
@@ -1,4131 +0,0 @@
-/* tslint:disable */
-/* eslint-disable */
-/**
- * OpenAI API
- * APIs for sampling from and fine-tuning language models
- *
- * The version of the OpenAPI document: 1.3.0
- *
- *
- * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
- * https://openapi-generator.tech
- * Do not edit the class manually.
- */
-
-
-import type { Configuration } from './configuration';
-import type { AxiosPromise, AxiosInstance, AxiosRequestConfig } from 'axios';
-import globalAxios from 'axios';
-// Some imports not used depending on template conditions
-// @ts-ignore
-import { DUMMY_BASE_URL, assertParamExists, setApiKeyToObject, setBasicAuthToObject, setBearerAuthToObject, setOAuthToObject, setSearchParams, serializeDataIfNeeded, toPathString, createRequestFunction } from './common';
-import type { RequestArgs } from './base';
-// @ts-ignore
-import { BASE_PATH, COLLECTION_FORMATS, BaseAPI, RequiredError } from './base';
-
-/**
- *
- * @export
- * @interface ChatCompletionFunctions
- */
-export interface ChatCompletionFunctions {
- /**
- * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
- * @type {string}
- * @memberof ChatCompletionFunctions
- */
- 'name': string;
- /**
- * The description of what the function does.
- * @type {string}
- * @memberof ChatCompletionFunctions
- */
- 'description'?: string;
- /**
- * The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
- * @type {{ [key: string]: any; }}
- * @memberof ChatCompletionFunctions
- */
- 'parameters'?: { [key: string]: any; };
-}
-/**
- *
- * @export
- * @interface ChatCompletionRequestMessage
- */
-export interface ChatCompletionRequestMessage {
- /**
- * The role of the messages author. One of `system`, `user`, `assistant`, or `function`.
- * @type {string}
- * @memberof ChatCompletionRequestMessage
- */
- 'role': ChatCompletionRequestMessageRoleEnum;
- /**
- * The contents of the message. `content` is required for all messages except assistant messages with function calls.
- * @type {string}
- * @memberof ChatCompletionRequestMessage
- */
- 'content'?: string;
- /**
- * The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
- * @type {string}
- * @memberof ChatCompletionRequestMessage
- */
- 'name'?: string;
- /**
- *
- * @type {ChatCompletionRequestMessageFunctionCall}
- * @memberof ChatCompletionRequestMessage
- */
- 'function_call'?: ChatCompletionRequestMessageFunctionCall;
-}
-
-export const ChatCompletionRequestMessageRoleEnum = {
- System: 'system',
- User: 'user',
- Assistant: 'assistant',
- Function: 'function'
-} as const;
-
-export type ChatCompletionRequestMessageRoleEnum = typeof ChatCompletionRequestMessageRoleEnum[keyof typeof ChatCompletionRequestMessageRoleEnum];
-
-/**
- * The name and arguments of a function that should be called, as generated by the model.
- * @export
- * @interface ChatCompletionRequestMessageFunctionCall
- */
-export interface ChatCompletionRequestMessageFunctionCall {
- /**
- * The name of the function to call.
- * @type {string}
- * @memberof ChatCompletionRequestMessageFunctionCall
- */
- 'name'?: string;
- /**
- * The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
- * @type {string}
- * @memberof ChatCompletionRequestMessageFunctionCall
- */
- 'arguments'?: string;
-}
-/**
- *
- * @export
- * @interface ChatCompletionResponseMessage
- */
-export interface ChatCompletionResponseMessage {
- /**
- * The role of the author of this message.
- * @type {string}
- * @memberof ChatCompletionResponseMessage
- */
- 'role': ChatCompletionResponseMessageRoleEnum;
- /**
- * The contents of the message.
- * @type {string}
- * @memberof ChatCompletionResponseMessage
- */
- 'content'?: string;
- /**
- *
- * @type {ChatCompletionRequestMessageFunctionCall}
- * @memberof ChatCompletionResponseMessage
- */
- 'function_call'?: ChatCompletionRequestMessageFunctionCall;
-}
-
-export const ChatCompletionResponseMessageRoleEnum = {
- System: 'system',
- User: 'user',
- Assistant: 'assistant',
- Function: 'function'
-} as const;
-
-export type ChatCompletionResponseMessageRoleEnum = typeof ChatCompletionResponseMessageRoleEnum[keyof typeof ChatCompletionResponseMessageRoleEnum];
-
-/**
- *
- * @export
- * @interface CreateAnswerRequest
- */
-export interface CreateAnswerRequest {
- /**
- * ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
- * @type {string}
- * @memberof CreateAnswerRequest
- */
- 'model': string;
- /**
- * Question to get answered.
- * @type {string}
- * @memberof CreateAnswerRequest
- */
- 'question': string;
- /**
- * List of (question, answer) pairs that will help steer the model towards the tone and answer format you\'d like. We recommend adding 2 to 3 examples.
- * @type {Array}
- * @memberof CreateAnswerRequest
- */
- 'examples': Array;
- /**
- * A text snippet containing the contextual information used to generate the answers for the `examples` you provide.
- * @type {string}
- * @memberof CreateAnswerRequest
- */
- 'examples_context': string;
- /**
- * List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. You should specify either `documents` or a `file`, but not both.
- * @type {Array}
- * @memberof CreateAnswerRequest
- */
- 'documents'?: Array | null;
- /**
- * The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. You should specify either `documents` or a `file`, but not both.
- * @type {string}
- * @memberof CreateAnswerRequest
- */
- 'file'?: string | null;
- /**
- * ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
- * @type {string}
- * @memberof CreateAnswerRequest
- */
- 'search_model'?: string | null;
- /**
- * The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost.
- * @type {number}
- * @memberof CreateAnswerRequest
- */
- 'max_rerank'?: number | null;
- /**
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
- * @type {number}
- * @memberof CreateAnswerRequest
- */
- 'temperature'?: number | null;
- /**
- * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
- * @type {number}
- * @memberof CreateAnswerRequest
- */
- 'logprobs'?: number | null;
- /**
- * The maximum number of tokens allowed for the generated answer
- * @type {number}
- * @memberof CreateAnswerRequest
- */
- 'max_tokens'?: number | null;
- /**
- *
- * @type {CreateAnswerRequestStop}
- * @memberof CreateAnswerRequest
- */
- 'stop'?: CreateAnswerRequestStop | null;
- /**
- * How many answers to generate for each question.
- * @type {number}
- * @memberof CreateAnswerRequest
- */
- 'n'?: number | null;
- /**
- * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
- * @type {object}
- * @memberof CreateAnswerRequest
- */
- 'logit_bias'?: object | null;
- /**
- * A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \"metadata\" field. This flag only takes effect when `file` is set.
- * @type {boolean}
- * @memberof CreateAnswerRequest
- */
- 'return_metadata'?: boolean | null;
- /**
- * If set to `true`, the returned JSON will include a \"prompt\" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes.
- * @type {boolean}
- * @memberof CreateAnswerRequest
- */
- 'return_prompt'?: boolean | null;
- /**
- * If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion.
- * @type {Array}
- * @memberof CreateAnswerRequest
- */
- 'expand'?: Array | null;
- /**
- * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @type {string}
- * @memberof CreateAnswerRequest
- */
- 'user'?: string;
-}
-/**
- * @type CreateAnswerRequestStop
- * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
- * @export
- */
-export type CreateAnswerRequestStop = Array | string;
-
-/**
- *
- * @export
- * @interface CreateAnswerResponse
- */
-export interface CreateAnswerResponse {
- /**
- *
- * @type {string}
- * @memberof CreateAnswerResponse
- */
- 'object'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateAnswerResponse
- */
- 'model'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateAnswerResponse
- */
- 'search_model'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateAnswerResponse
- */
- 'completion'?: string;
- /**
- *
- * @type {Array}
- * @memberof CreateAnswerResponse
- */
- 'answers'?: Array;
- /**
- *
- * @type {Array}
- * @memberof CreateAnswerResponse
- */
- 'selected_documents'?: Array;
-}
-/**
- *
- * @export
- * @interface CreateAnswerResponseSelectedDocumentsInner
- */
-export interface CreateAnswerResponseSelectedDocumentsInner {
- /**
- *
- * @type {number}
- * @memberof CreateAnswerResponseSelectedDocumentsInner
- */
- 'document'?: number;
- /**
- *
- * @type {string}
- * @memberof CreateAnswerResponseSelectedDocumentsInner
- */
- 'text'?: string;
-}
-/**
- *
- * @export
- * @interface CreateChatCompletionRequest
- */
-export interface CreateChatCompletionRequest {
- /**
- * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
- * @type {string}
- * @memberof CreateChatCompletionRequest
- */
- 'model': string;
- /**
- * A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb).
- * @type {Array}
- * @memberof CreateChatCompletionRequest
- */
- 'messages': Array;
- /**
- * A list of functions the model may generate JSON inputs for.
- * @type {Array}
- * @memberof CreateChatCompletionRequest
- */
- 'functions'?: Array;
- /**
- *
- * @type {CreateChatCompletionRequestFunctionCall}
- * @memberof CreateChatCompletionRequest
- */
- 'function_call'?: CreateChatCompletionRequestFunctionCall;
- /**
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
- * @type {number}
- * @memberof CreateChatCompletionRequest
- */
- 'temperature'?: number | null;
- /**
- * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
- * @type {number}
- * @memberof CreateChatCompletionRequest
- */
- 'top_p'?: number | null;
- /**
- * How many chat completion choices to generate for each input message.
- * @type {number}
- * @memberof CreateChatCompletionRequest
- */
- 'n'?: number | null;
- /**
- * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
- * @type {boolean}
- * @memberof CreateChatCompletionRequest
- */
- 'stream'?: boolean | null;
- /**
- *
- * @type {CreateChatCompletionRequestStop}
- * @memberof CreateChatCompletionRequest
- */
- 'stop'?: CreateChatCompletionRequestStop;
- /**
- * The maximum number of [tokens](/tokenizer) to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model\'s context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
- * @type {number}
- * @memberof CreateChatCompletionRequest
- */
- 'max_tokens'?: number;
- /**
- * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
- * @type {number}
- * @memberof CreateChatCompletionRequest
- */
- 'presence_penalty'?: number | null;
- /**
- * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
- * @type {number}
- * @memberof CreateChatCompletionRequest
- */
- 'frequency_penalty'?: number | null;
- /**
- * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
- * @type {object}
- * @memberof CreateChatCompletionRequest
- */
- 'logit_bias'?: object | null;
- /**
- * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @type {string}
- * @memberof CreateChatCompletionRequest
- */
- 'user'?: string;
-}
-/**
- * @type CreateChatCompletionRequestFunctionCall
- * Controls how the model responds to function calls. \"none\" means the model does not call a function, and responds to the end-user. \"auto\" means the model can pick between an end-user or calling a function. Specifying a particular function via `{\"name\":\\ \"my_function\"}` forces the model to call that function. \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.
- * @export
- */
-export type CreateChatCompletionRequestFunctionCall = CreateChatCompletionRequestFunctionCallOneOf | string;
-
-/**
- *
- * @export
- * @interface CreateChatCompletionRequestFunctionCallOneOf
- */
-export interface CreateChatCompletionRequestFunctionCallOneOf {
- /**
- * The name of the function to call.
- * @type {string}
- * @memberof CreateChatCompletionRequestFunctionCallOneOf
- */
- 'name': string;
-}
-/**
- * @type CreateChatCompletionRequestStop
- * Up to 4 sequences where the API will stop generating further tokens.
- * @export
- */
-export type CreateChatCompletionRequestStop = Array | string;
-
-/**
- *
- * @export
- * @interface CreateChatCompletionResponse
- */
-export interface CreateChatCompletionResponse {
- /**
- *
- * @type {string}
- * @memberof CreateChatCompletionResponse
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof CreateChatCompletionResponse
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof CreateChatCompletionResponse
- */
- 'created': number;
- /**
- *
- * @type {string}
- * @memberof CreateChatCompletionResponse
- */
- 'model': string;
- /**
- *
- * @type {Array}
- * @memberof CreateChatCompletionResponse
- */
- 'choices': Array;
- /**
- *
- * @type {CreateCompletionResponseUsage}
- * @memberof CreateChatCompletionResponse
- */
- 'usage'?: CreateCompletionResponseUsage;
-}
-/**
- *
- * @export
- * @interface CreateChatCompletionResponseChoicesInner
- */
-export interface CreateChatCompletionResponseChoicesInner {
- /**
- *
- * @type {number}
- * @memberof CreateChatCompletionResponseChoicesInner
- */
- 'index'?: number;
- /**
- *
- * @type {ChatCompletionResponseMessage}
- * @memberof CreateChatCompletionResponseChoicesInner
- */
- 'message'?: ChatCompletionResponseMessage;
- /**
- *
- * @type {string}
- * @memberof CreateChatCompletionResponseChoicesInner
- */
- 'finish_reason'?: string;
-}
-/**
- *
- * @export
- * @interface CreateClassificationRequest
- */
-export interface CreateClassificationRequest {
- /**
- * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
- * @type {string}
- * @memberof CreateClassificationRequest
- */
- 'model': string;
- /**
- * Query to be classified.
- * @type {string}
- * @memberof CreateClassificationRequest
- */
- 'query': string;
- /**
- * A list of examples with labels, in the following format: `[[\"The movie is so interesting.\", \"Positive\"], [\"It is quite boring.\", \"Negative\"], ...]` All the label strings will be normalized to be capitalized. You should specify either `examples` or `file`, but not both.
- * @type {Array}
- * @memberof CreateClassificationRequest
- */
- 'examples'?: Array | null;
- /**
- * The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. You should specify either `examples` or `file`, but not both.
- * @type {string}
- * @memberof CreateClassificationRequest
- */
- 'file'?: string | null;
- /**
- * The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized.
- * @type {Array}
- * @memberof CreateClassificationRequest
- */
- 'labels'?: Array | null;
- /**
- * ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
- * @type {string}
- * @memberof CreateClassificationRequest
- */
- 'search_model'?: string | null;
- /**
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
- * @type {number}
- * @memberof CreateClassificationRequest
- */
- 'temperature'?: number | null;
- /**
- * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
- * @type {number}
- * @memberof CreateClassificationRequest
- */
- 'logprobs'?: number | null;
- /**
- * The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost.
- * @type {number}
- * @memberof CreateClassificationRequest
- */
- 'max_examples'?: number | null;
- /**
- * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
- * @type {object}
- * @memberof CreateClassificationRequest
- */
- 'logit_bias'?: object | null;
- /**
- * If set to `true`, the returned JSON will include a \"prompt\" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes.
- * @type {boolean}
- * @memberof CreateClassificationRequest
- */
- 'return_prompt'?: boolean | null;
- /**
- * A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \"metadata\" field. This flag only takes effect when `file` is set.
- * @type {boolean}
- * @memberof CreateClassificationRequest
- */
- 'return_metadata'?: boolean | null;
- /**
- * If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion.
- * @type {Array}
- * @memberof CreateClassificationRequest
- */
- 'expand'?: Array | null;
- /**
- * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @type {string}
- * @memberof CreateClassificationRequest
- */
- 'user'?: string;
-}
-/**
- *
- * @export
- * @interface CreateClassificationResponse
- */
-export interface CreateClassificationResponse {
- /**
- *
- * @type {string}
- * @memberof CreateClassificationResponse
- */
- 'object'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateClassificationResponse
- */
- 'model'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateClassificationResponse
- */
- 'search_model'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateClassificationResponse
- */
- 'completion'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateClassificationResponse
- */
- 'label'?: string;
- /**
- *
- * @type {Array}
- * @memberof CreateClassificationResponse
- */
- 'selected_examples'?: Array;
-}
-/**
- *
- * @export
- * @interface CreateClassificationResponseSelectedExamplesInner
- */
-export interface CreateClassificationResponseSelectedExamplesInner {
- /**
- *
- * @type {number}
- * @memberof CreateClassificationResponseSelectedExamplesInner
- */
- 'document'?: number;
- /**
- *
- * @type {string}
- * @memberof CreateClassificationResponseSelectedExamplesInner
- */
- 'text'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateClassificationResponseSelectedExamplesInner
- */
- 'label'?: string;
-}
-/**
- *
- * @export
- * @interface CreateCompletionRequest
- */
-export interface CreateCompletionRequest {
- /**
- * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
- * @type {string}
- * @memberof CreateCompletionRequest
- */
- 'model': string;
- /**
- *
- * @type {CreateCompletionRequestPrompt}
- * @memberof CreateCompletionRequest
- */
- 'prompt'?: CreateCompletionRequestPrompt | null;
- /**
- * The suffix that comes after a completion of inserted text.
- * @type {string}
- * @memberof CreateCompletionRequest
- */
- 'suffix'?: string | null;
- /**
- * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'max_tokens'?: number | null;
- /**
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'temperature'?: number | null;
- /**
- * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'top_p'?: number | null;
- /**
- * How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'n'?: number | null;
- /**
- * Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
- * @type {boolean}
- * @memberof CreateCompletionRequest
- */
- 'stream'?: boolean | null;
- /**
- * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5.
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'logprobs'?: number | null;
- /**
- * Echo back the prompt in addition to the completion
- * @type {boolean}
- * @memberof CreateCompletionRequest
- */
- 'echo'?: boolean | null;
- /**
- *
- * @type {CreateCompletionRequestStop}
- * @memberof CreateCompletionRequest
- */
- 'stop'?: CreateCompletionRequestStop | null;
- /**
- * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'presence_penalty'?: number | null;
- /**
- * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'frequency_penalty'?: number | null;
- /**
- * Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'best_of'?: number | null;
- /**
- * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
- * @type {object}
- * @memberof CreateCompletionRequest
- */
- 'logit_bias'?: object | null;
- /**
- * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @type {string}
- * @memberof CreateCompletionRequest
- */
- 'user'?: string;
-}
-/**
- * @type CreateCompletionRequestPrompt
- * The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
- * @export
- */
-export type CreateCompletionRequestPrompt = Array | Array | Array | string;
-
-/**
- * @type CreateCompletionRequestStop
- * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
- * @export
- */
-export type CreateCompletionRequestStop = Array | string;
-
-/**
- *
- * @export
- * @interface CreateCompletionResponse
- */
-export interface CreateCompletionResponse {
- /**
- *
- * @type {string}
- * @memberof CreateCompletionResponse
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof CreateCompletionResponse
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof CreateCompletionResponse
- */
- 'created': number;
- /**
- *
- * @type {string}
- * @memberof CreateCompletionResponse
- */
- 'model': string;
- /**
- *
- * @type {Array}
- * @memberof CreateCompletionResponse
- */
- 'choices': Array;
- /**
- *
- * @type {CreateCompletionResponseUsage}
- * @memberof CreateCompletionResponse
- */
- 'usage'?: CreateCompletionResponseUsage;
-}
-/**
- *
- * @export
- * @interface CreateCompletionResponseChoicesInner
- */
-export interface CreateCompletionResponseChoicesInner {
- /**
- *
- * @type {string}
- * @memberof CreateCompletionResponseChoicesInner
- */
- 'text'?: string;
- /**
- *
- * @type {number}
- * @memberof CreateCompletionResponseChoicesInner
- */
- 'index'?: number;
- /**
- *
- * @type {CreateCompletionResponseChoicesInnerLogprobs}
- * @memberof CreateCompletionResponseChoicesInner
- */
- 'logprobs'?: CreateCompletionResponseChoicesInnerLogprobs | null;
- /**
- *
- * @type {string}
- * @memberof CreateCompletionResponseChoicesInner
- */
- 'finish_reason'?: string;
-}
-/**
- *
- * @export
- * @interface CreateCompletionResponseChoicesInnerLogprobs
- */
-export interface CreateCompletionResponseChoicesInnerLogprobs {
- /**
- *
- * @type {Array}
- * @memberof CreateCompletionResponseChoicesInnerLogprobs
- */
- 'tokens'?: Array;
- /**
- *
- * @type {Array}
- * @memberof CreateCompletionResponseChoicesInnerLogprobs
- */
- 'token_logprobs'?: Array;
- /**
- *
- * @type {Array}
- * @memberof CreateCompletionResponseChoicesInnerLogprobs
- */
- 'top_logprobs'?: Array;
- /**
- *
- * @type {Array}
- * @memberof CreateCompletionResponseChoicesInnerLogprobs
- */
- 'text_offset'?: Array;
-}
-/**
- *
- * @export
- * @interface CreateCompletionResponseUsage
- */
-export interface CreateCompletionResponseUsage {
- /**
- *
- * @type {number}
- * @memberof CreateCompletionResponseUsage
- */
- 'prompt_tokens': number;
- /**
- *
- * @type {number}
- * @memberof CreateCompletionResponseUsage
- */
- 'completion_tokens': number;
- /**
- *
- * @type {number}
- * @memberof CreateCompletionResponseUsage
- */
- 'total_tokens': number;
-}
-/**
- *
- * @export
- * @interface CreateEditRequest
- */
-export interface CreateEditRequest {
- /**
- * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint.
- * @type {string}
- * @memberof CreateEditRequest
- */
- 'model': string;
- /**
- * The input text to use as a starting point for the edit.
- * @type {string}
- * @memberof CreateEditRequest
- */
- 'input'?: string | null;
- /**
- * The instruction that tells the model how to edit the prompt.
- * @type {string}
- * @memberof CreateEditRequest
- */
- 'instruction': string;
- /**
- * How many edits to generate for the input and instruction.
- * @type {number}
- * @memberof CreateEditRequest
- */
- 'n'?: number | null;
- /**
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
- * @type {number}
- * @memberof CreateEditRequest
- */
- 'temperature'?: number | null;
- /**
- * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
- * @type {number}
- * @memberof CreateEditRequest
- */
- 'top_p'?: number | null;
-}
-/**
- *
- * @export
- * @interface CreateEditResponse
- */
-export interface CreateEditResponse {
- /**
- *
- * @type {string}
- * @memberof CreateEditResponse
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof CreateEditResponse
- */
- 'created': number;
- /**
- *
- * @type {Array}
- * @memberof CreateEditResponse
- */
- 'choices': Array;
- /**
- *
- * @type {CreateCompletionResponseUsage}
- * @memberof CreateEditResponse
- */
- 'usage': CreateCompletionResponseUsage;
-}
-/**
- *
- * @export
- * @interface CreateEmbeddingRequest
- */
-export interface CreateEmbeddingRequest {
- /**
- * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
- * @type {string}
- * @memberof CreateEmbeddingRequest
- */
- 'model': string;
- /**
- *
- * @type {CreateEmbeddingRequestInput}
- * @memberof CreateEmbeddingRequest
- */
- 'input': CreateEmbeddingRequestInput;
- /**
- * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @type {string}
- * @memberof CreateEmbeddingRequest
- */
- 'user'?: string;
-}
-/**
- * @type CreateEmbeddingRequestInput
- * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`). [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
- * @export
- */
-export type CreateEmbeddingRequestInput = Array | Array | Array | string;
-
-/**
- *
- * @export
- * @interface CreateEmbeddingResponse
- */
-export interface CreateEmbeddingResponse {
- /**
- *
- * @type {string}
- * @memberof CreateEmbeddingResponse
- */
- 'object': string;
- /**
- *
- * @type {string}
- * @memberof CreateEmbeddingResponse
- */
- 'model': string;
- /**
- *
- * @type {Array}
- * @memberof CreateEmbeddingResponse
- */
- 'data': Array;
- /**
- *
- * @type {CreateEmbeddingResponseUsage}
- * @memberof CreateEmbeddingResponse
- */
- 'usage': CreateEmbeddingResponseUsage;
-}
-/**
- *
- * @export
- * @interface CreateEmbeddingResponseDataInner
- */
-export interface CreateEmbeddingResponseDataInner {
- /**
- *
- * @type {number}
- * @memberof CreateEmbeddingResponseDataInner
- */
- 'index': number;
- /**
- *
- * @type {string}
- * @memberof CreateEmbeddingResponseDataInner
- */
- 'object': string;
- /**
- *
- * @type {Array}
- * @memberof CreateEmbeddingResponseDataInner
- */
- 'embedding': Array;
-}
-/**
- *
- * @export
- * @interface CreateEmbeddingResponseUsage
- */
-export interface CreateEmbeddingResponseUsage {
- /**
- *
- * @type {number}
- * @memberof CreateEmbeddingResponseUsage
- */
- 'prompt_tokens': number;
- /**
- *
- * @type {number}
- * @memberof CreateEmbeddingResponseUsage
- */
- 'total_tokens': number;
-}
-/**
- *
- * @export
- * @interface CreateFineTuneRequest
- */
-export interface CreateFineTuneRequest {
- /**
- * The ID of an uploaded file that contains training data. See [upload file](/docs/api-reference/files/upload) for how to upload a file. Your dataset must be formatted as a JSONL file, where each training example is a JSON object with the keys \"prompt\" and \"completion\". Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.
- * @type {string}
- * @memberof CreateFineTuneRequest
- */
- 'training_file': string;
- /**
- * The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). Your train and validation data should be mutually exclusive. Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys \"prompt\" and \"completion\". Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.
- * @type {string}
- * @memberof CreateFineTuneRequest
- */
- 'validation_file'?: string | null;
- /**
- * The name of the base model to fine-tune. You can select one of \"ada\", \"babbage\", \"curie\", \"davinci\", or a fine-tuned model created after 2022-04-21. To learn more about these models, see the [Models](https://platform.openai.com/docs/models) documentation.
- * @type {string}
- * @memberof CreateFineTuneRequest
- */
- 'model'?: string | null;
- /**
- * The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
- * @type {number}
- * @memberof CreateFineTuneRequest
- */
- 'n_epochs'?: number | null;
- /**
- * The batch size to use for training. The batch size is the number of training examples used to train a single forward and backward pass. By default, the batch size will be dynamically configured to be ~0.2% of the number of examples in the training set, capped at 256 - in general, we\'ve found that larger batch sizes tend to work better for larger datasets.
- * @type {number}
- * @memberof CreateFineTuneRequest
- */
- 'batch_size'?: number | null;
- /**
- * The learning rate multiplier to use for training. The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value. By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final `batch_size` (larger learning rates tend to perform better with larger batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results.
- * @type {number}
- * @memberof CreateFineTuneRequest
- */
- 'learning_rate_multiplier'?: number | null;
- /**
- * The weight to use for loss on the prompt tokens. This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0), and can add a stabilizing effect to training when completions are short. If prompts are extremely long (relative to completions), it may make sense to reduce this weight so as to avoid over-prioritizing learning the prompt.
- * @type {number}
- * @memberof CreateFineTuneRequest
- */
- 'prompt_loss_weight'?: number | null;
- /**
- * If set, we calculate classification-specific metrics such as accuracy and F-1 score using the validation set at the end of every epoch. These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). In order to compute classification metrics, you must provide a `validation_file`. Additionally, you must specify `classification_n_classes` for multiclass classification or `classification_positive_class` for binary classification.
- * @type {boolean}
- * @memberof CreateFineTuneRequest
- */
- 'compute_classification_metrics'?: boolean | null;
- /**
- * The number of classes in a classification task. This parameter is required for multiclass classification.
- * @type {number}
- * @memberof CreateFineTuneRequest
- */
- 'classification_n_classes'?: number | null;
- /**
- * The positive class in binary classification. This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification.
- * @type {string}
- * @memberof CreateFineTuneRequest
- */
- 'classification_positive_class'?: string | null;
- /**
- * If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score is a generalization of F-1 score. This is only used for binary classification. With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger beta score puts more weight on recall and less on precision. A smaller beta score puts more weight on precision and less on recall.
- * @type {Array}
- * @memberof CreateFineTuneRequest
- */
- 'classification_betas'?: Array | null;
- /**
- * A string of up to 40 characters that will be added to your fine-tuned model name. For example, a `suffix` of \"custom-model-name\" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
- * @type {string}
- * @memberof CreateFineTuneRequest
- */
- 'suffix'?: string | null;
-}
-/**
- *
- * @export
- * @interface CreateImageRequest
- */
-export interface CreateImageRequest {
- /**
- * A text description of the desired image(s). The maximum length is 1000 characters.
- * @type {string}
- * @memberof CreateImageRequest
- */
- 'prompt': string;
- /**
- * The number of images to generate. Must be between 1 and 10.
- * @type {number}
- * @memberof CreateImageRequest
- */
- 'n'?: number | null;
- /**
- * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
- * @type {string}
- * @memberof CreateImageRequest
- */
- 'size'?: CreateImageRequestSizeEnum;
- /**
- * The format in which the generated images are returned. Must be one of `url` or `b64_json`.
- * @type {string}
- * @memberof CreateImageRequest
- */
- 'response_format'?: CreateImageRequestResponseFormatEnum;
- /**
- * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @type {string}
- * @memberof CreateImageRequest
- */
- 'user'?: string;
-}
-
-export const CreateImageRequestSizeEnum = {
- _256x256: '256x256',
- _512x512: '512x512',
- _1024x1024: '1024x1024'
-} as const;
-
-export type CreateImageRequestSizeEnum = typeof CreateImageRequestSizeEnum[keyof typeof CreateImageRequestSizeEnum];
-export const CreateImageRequestResponseFormatEnum = {
- Url: 'url',
- B64Json: 'b64_json'
-} as const;
-
-export type CreateImageRequestResponseFormatEnum = typeof CreateImageRequestResponseFormatEnum[keyof typeof CreateImageRequestResponseFormatEnum];
-
-/**
- *
- * @export
- * @interface CreateModerationRequest
- */
-export interface CreateModerationRequest {
- /**
- *
- * @type {CreateModerationRequestInput}
- * @memberof CreateModerationRequest
- */
- 'input': CreateModerationRequestInput;
- /**
- * Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
- * @type {string}
- * @memberof CreateModerationRequest
- */
- 'model'?: string;
-}
-/**
- * @type CreateModerationRequestInput
- * The input text to classify
- * @export
- */
-export type CreateModerationRequestInput = Array | string;
-
-/**
- *
- * @export
- * @interface CreateModerationResponse
- */
-export interface CreateModerationResponse {
- /**
- *
- * @type {string}
- * @memberof CreateModerationResponse
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof CreateModerationResponse
- */
- 'model': string;
- /**
- *
- * @type {Array}
- * @memberof CreateModerationResponse
- */
- 'results': Array;
-}
-/**
- *
- * @export
- * @interface CreateModerationResponseResultsInner
- */
-export interface CreateModerationResponseResultsInner {
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInner
- */
- 'flagged': boolean;
- /**
- *
- * @type {CreateModerationResponseResultsInnerCategories}
- * @memberof CreateModerationResponseResultsInner
- */
- 'categories': CreateModerationResponseResultsInnerCategories;
- /**
- *
- * @type {CreateModerationResponseResultsInnerCategoryScores}
- * @memberof CreateModerationResponseResultsInner
- */
- 'category_scores': CreateModerationResponseResultsInnerCategoryScores;
-}
-/**
- *
- * @export
- * @interface CreateModerationResponseResultsInnerCategories
- */
-export interface CreateModerationResponseResultsInnerCategories {
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInnerCategories
- */
- 'hate': boolean;
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInnerCategories
- */
- 'hate/threatening': boolean;
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInnerCategories
- */
- 'self-harm': boolean;
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInnerCategories
- */
- 'sexual': boolean;
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInnerCategories
- */
- 'sexual/minors': boolean;
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInnerCategories
- */
- 'violence': boolean;
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInnerCategories
- */
- 'violence/graphic': boolean;
-}
-/**
- *
- * @export
- * @interface CreateModerationResponseResultsInnerCategoryScores
- */
-export interface CreateModerationResponseResultsInnerCategoryScores {
- /**
- *
- * @type {number}
- * @memberof CreateModerationResponseResultsInnerCategoryScores
- */
- 'hate': number;
- /**
- *
- * @type {number}
- * @memberof CreateModerationResponseResultsInnerCategoryScores
- */
- 'hate/threatening': number;
- /**
- *
- * @type {number}
- * @memberof CreateModerationResponseResultsInnerCategoryScores
- */
- 'self-harm': number;
- /**
- *
- * @type {number}
- * @memberof CreateModerationResponseResultsInnerCategoryScores
- */
- 'sexual': number;
- /**
- *
- * @type {number}
- * @memberof CreateModerationResponseResultsInnerCategoryScores
- */
- 'sexual/minors': number;
- /**
- *
- * @type {number}
- * @memberof CreateModerationResponseResultsInnerCategoryScores
- */
- 'violence': number;
- /**
- *
- * @type {number}
- * @memberof CreateModerationResponseResultsInnerCategoryScores
- */
- 'violence/graphic': number;
-}
-/**
- *
- * @export
- * @interface CreateSearchRequest
- */
-export interface CreateSearchRequest {
- /**
- * Query to search against the documents.
- * @type {string}
- * @memberof CreateSearchRequest
- */
- 'query': string;
- /**
- * Up to 200 documents to search over, provided as a list of strings. The maximum document length (in tokens) is 2034 minus the number of tokens in the query. You should specify either `documents` or a `file`, but not both.
- * @type {Array}
- * @memberof CreateSearchRequest
- */
- 'documents'?: Array | null;
- /**
- * The ID of an uploaded file that contains documents to search over. You should specify either `documents` or a `file`, but not both.
- * @type {string}
- * @memberof CreateSearchRequest
- */
- 'file'?: string | null;
- /**
- * The maximum number of documents to be re-ranked and returned by search. This flag only takes effect when `file` is set.
- * @type {number}
- * @memberof CreateSearchRequest
- */
- 'max_rerank'?: number | null;
- /**
- * A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \"metadata\" field. This flag only takes effect when `file` is set.
- * @type {boolean}
- * @memberof CreateSearchRequest
- */
- 'return_metadata'?: boolean | null;
- /**
- * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @type {string}
- * @memberof CreateSearchRequest
- */
- 'user'?: string;
-}
-/**
- *
- * @export
- * @interface CreateSearchResponse
- */
-export interface CreateSearchResponse {
- /**
- *
- * @type {string}
- * @memberof CreateSearchResponse
- */
- 'object'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateSearchResponse
- */
- 'model'?: string;
- /**
- *
- * @type {Array}
- * @memberof CreateSearchResponse
- */
- 'data'?: Array;
-}
-/**
- *
- * @export
- * @interface CreateSearchResponseDataInner
- */
-export interface CreateSearchResponseDataInner {
- /**
- *
- * @type {string}
- * @memberof CreateSearchResponseDataInner
- */
- 'object'?: string;
- /**
- *
- * @type {number}
- * @memberof CreateSearchResponseDataInner
- */
- 'document'?: number;
- /**
- *
- * @type {number}
- * @memberof CreateSearchResponseDataInner
- */
- 'score'?: number;
-}
-/**
- *
- * @export
- * @interface CreateTranscriptionResponse
- */
-export interface CreateTranscriptionResponse {
- /**
- *
- * @type {string}
- * @memberof CreateTranscriptionResponse
- */
- 'text': string;
-}
-/**
- *
- * @export
- * @interface CreateTranslationResponse
- */
-export interface CreateTranslationResponse {
- /**
- *
- * @type {string}
- * @memberof CreateTranslationResponse
- */
- 'text': string;
-}
-/**
- *
- * @export
- * @interface DeleteFileResponse
- */
-export interface DeleteFileResponse {
- /**
- *
- * @type {string}
- * @memberof DeleteFileResponse
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof DeleteFileResponse
- */
- 'object': string;
- /**
- *
- * @type {boolean}
- * @memberof DeleteFileResponse
- */
- 'deleted': boolean;
-}
-/**
- *
- * @export
- * @interface DeleteModelResponse
- */
-export interface DeleteModelResponse {
- /**
- *
- * @type {string}
- * @memberof DeleteModelResponse
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof DeleteModelResponse
- */
- 'object': string;
- /**
- *
- * @type {boolean}
- * @memberof DeleteModelResponse
- */
- 'deleted': boolean;
-}
-/**
- *
- * @export
- * @interface Engine
- */
-export interface Engine {
- /**
- *
- * @type {string}
- * @memberof Engine
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof Engine
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof Engine
- */
- 'created': number | null;
- /**
- *
- * @type {boolean}
- * @memberof Engine
- */
- 'ready': boolean;
-}
-/**
- *
- * @export
- * @interface ErrorResponse
- */
-export interface ErrorResponse {
- /**
- *
- * @type {Error}
- * @memberof ErrorResponse
- */
- 'error': Error;
-}
-/**
- *
- * @export
- * @interface FineTune
- */
-export interface FineTune {
- /**
- *
- * @type {string}
- * @memberof FineTune
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof FineTune
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof FineTune
- */
- 'created_at': number;
- /**
- *
- * @type {number}
- * @memberof FineTune
- */
- 'updated_at': number;
- /**
- *
- * @type {string}
- * @memberof FineTune
- */
- 'model': string;
- /**
- *
- * @type {string}
- * @memberof FineTune
- */
- 'fine_tuned_model': string | null;
- /**
- *
- * @type {string}
- * @memberof FineTune
- */
- 'organization_id': string;
- /**
- *
- * @type {string}
- * @memberof FineTune
- */
- 'status': string;
- /**
- *
- * @type {object}
- * @memberof FineTune
- */
- 'hyperparams': object;
- /**
- *
- * @type {Array}
- * @memberof FineTune
- */
- 'training_files': Array;
- /**
- *
- * @type {Array}
- * @memberof FineTune
- */
- 'validation_files': Array;
- /**
- *
- * @type {Array}
- * @memberof FineTune
- */
- 'result_files': Array;
- /**
- *
- * @type {Array}
- * @memberof FineTune
- */
- 'events'?: Array;
-}
-/**
- *
- * @export
- * @interface FineTuneEvent
- */
-export interface FineTuneEvent {
- /**
- *
- * @type {string}
- * @memberof FineTuneEvent
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof FineTuneEvent
- */
- 'created_at': number;
- /**
- *
- * @type {string}
- * @memberof FineTuneEvent
- */
- 'level': string;
- /**
- *
- * @type {string}
- * @memberof FineTuneEvent
- */
- 'message': string;
-}
-/**
- *
- * @export
- * @interface ImagesResponse
- */
-export interface ImagesResponse {
- /**
- *
- * @type {number}
- * @memberof ImagesResponse
- */
- 'created': number;
- /**
- *
- * @type {Array}
- * @memberof ImagesResponse
- */
- 'data': Array;
-}
-/**
- *
- * @export
- * @interface ImagesResponseDataInner
- */
-export interface ImagesResponseDataInner {
- /**
- *
- * @type {string}
- * @memberof ImagesResponseDataInner
- */
- 'url'?: string;
- /**
- *
- * @type {string}
- * @memberof ImagesResponseDataInner
- */
- 'b64_json'?: string;
-}
-/**
- *
- * @export
- * @interface ListEnginesResponse
- */
-export interface ListEnginesResponse {
- /**
- *
- * @type {string}
- * @memberof ListEnginesResponse
- */
- 'object': string;
- /**
- *
- * @type {Array}
- * @memberof ListEnginesResponse
- */
- 'data': Array;
-}
-/**
- *
- * @export
- * @interface ListFilesResponse
- */
-export interface ListFilesResponse {
- /**
- *
- * @type {string}
- * @memberof ListFilesResponse
- */
- 'object': string;
- /**
- *
- * @type {Array}
- * @memberof ListFilesResponse
- */
- 'data': Array;
-}
-/**
- *
- * @export
- * @interface ListFineTuneEventsResponse
- */
-export interface ListFineTuneEventsResponse {
- /**
- *
- * @type {string}
- * @memberof ListFineTuneEventsResponse
- */
- 'object': string;
- /**
- *
- * @type {Array}
- * @memberof ListFineTuneEventsResponse
- */
- 'data': Array;
-}
-/**
- *
- * @export
- * @interface ListFineTunesResponse
- */
-export interface ListFineTunesResponse {
- /**
- *
- * @type {string}
- * @memberof ListFineTunesResponse
- */
- 'object': string;
- /**
- *
- * @type {Array}
- * @memberof ListFineTunesResponse
- */
- 'data': Array;
-}
-/**
- *
- * @export
- * @interface ListModelsResponse
- */
-export interface ListModelsResponse {
- /**
- *
- * @type {string}
- * @memberof ListModelsResponse
- */
- 'object': string;
- /**
- *
- * @type {Array}
- * @memberof ListModelsResponse
- */
- 'data': Array;
-}
-/**
- *
- * @export
- * @interface Model
- */
-export interface Model {
- /**
- *
- * @type {string}
- * @memberof Model
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof Model
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof Model
- */
- 'created': number;
- /**
- *
- * @type {string}
- * @memberof Model
- */
- 'owned_by': string;
-}
-/**
- *
- * @export
- * @interface ModelError
- */
-export interface ModelError {
- /**
- *
- * @type {string}
- * @memberof ModelError
- */
- 'type': string;
- /**
- *
- * @type {string}
- * @memberof ModelError
- */
- 'message': string;
- /**
- *
- * @type {string}
- * @memberof ModelError
- */
- 'param': string | null;
- /**
- *
- * @type {string}
- * @memberof ModelError
- */
- 'code': string | null;
-}
-/**
- *
- * @export
- * @interface OpenAIFile
- */
-export interface OpenAIFile {
- /**
- *
- * @type {string}
- * @memberof OpenAIFile
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof OpenAIFile
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof OpenAIFile
- */
- 'bytes': number;
- /**
- *
- * @type {number}
- * @memberof OpenAIFile
- */
- 'created_at': number;
- /**
- *
- * @type {string}
- * @memberof OpenAIFile
- */
- 'filename': string;
- /**
- *
- * @type {string}
- * @memberof OpenAIFile
- */
- 'purpose': string;
- /**
- *
- * @type {string}
- * @memberof OpenAIFile
- */
- 'status'?: string;
- /**
- *
- * @type {object}
- * @memberof OpenAIFile
- */
- 'status_details'?: object | null;
-}
-
-/**
- * OpenAIApi - axios parameter creator
- * @export
- */
-export const OpenAIApiAxiosParamCreator = function (configuration?: Configuration) {
- return {
- /**
- *
- * @summary Immediately cancel a fine-tune job.
- * @param {string} fineTuneId The ID of the fine-tune job to cancel
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- cancelFineTune: async (fineTuneId: string, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'fineTuneId' is not null or undefined
- assertParamExists('cancelFineTune', 'fineTuneId', fineTuneId)
- const localVarPath = `/fine-tunes/{fine_tune_id}/cancel`
- .replace(`{${"fine_tune_id"}}`, encodeURIComponent(String(fineTuneId)));
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
- * @param {CreateAnswerRequest} createAnswerRequest
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- createAnswer: async (createAnswerRequest: CreateAnswerRequest, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'createAnswerRequest' is not null or undefined
- assertParamExists('createAnswer', 'createAnswerRequest', createAnswerRequest)
- const localVarPath = `/answers`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- localVarHeaderParameter['Content-Type'] = 'application/json';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = serializeDataIfNeeded(createAnswerRequest, localVarRequestOptions, configuration)
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Creates a model response for the given chat conversation.
- * @param {CreateChatCompletionRequest} createChatCompletionRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createChatCompletion: async (createChatCompletionRequest: CreateChatCompletionRequest, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'createChatCompletionRequest' is not null or undefined
- assertParamExists('createChatCompletion', 'createChatCompletionRequest', createChatCompletionRequest)
- const localVarPath = `/chat/completions`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- localVarHeaderParameter['Content-Type'] = 'application/json';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = serializeDataIfNeeded(createChatCompletionRequest, localVarRequestOptions, configuration)
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
- * @param {CreateClassificationRequest} createClassificationRequest
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- createClassification: async (createClassificationRequest: CreateClassificationRequest, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'createClassificationRequest' is not null or undefined
- assertParamExists('createClassification', 'createClassificationRequest', createClassificationRequest)
- const localVarPath = `/classifications`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- localVarHeaderParameter['Content-Type'] = 'application/json';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = serializeDataIfNeeded(createClassificationRequest, localVarRequestOptions, configuration)
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Creates a completion for the provided prompt and parameters.
- * @param {CreateCompletionRequest} createCompletionRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createCompletion: async (createCompletionRequest: CreateCompletionRequest, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'createCompletionRequest' is not null or undefined
- assertParamExists('createCompletion', 'createCompletionRequest', createCompletionRequest)
- const localVarPath = `/completions`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- localVarHeaderParameter['Content-Type'] = 'application/json';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = serializeDataIfNeeded(createCompletionRequest, localVarRequestOptions, configuration)
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Creates a new edit for the provided input, instruction, and parameters.
- * @param {CreateEditRequest} createEditRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createEdit: async (createEditRequest: CreateEditRequest, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'createEditRequest' is not null or undefined
- assertParamExists('createEdit', 'createEditRequest', createEditRequest)
- const localVarPath = `/edits`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- localVarHeaderParameter['Content-Type'] = 'application/json';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = serializeDataIfNeeded(createEditRequest, localVarRequestOptions, configuration)
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Creates an embedding vector representing the input text.
- * @param {CreateEmbeddingRequest} createEmbeddingRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createEmbedding: async (createEmbeddingRequest: CreateEmbeddingRequest, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'createEmbeddingRequest' is not null or undefined
- assertParamExists('createEmbedding', 'createEmbeddingRequest', createEmbeddingRequest)
- const localVarPath = `/embeddings`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- localVarHeaderParameter['Content-Type'] = 'application/json';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = serializeDataIfNeeded(createEmbeddingRequest, localVarRequestOptions, configuration)
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
- * @param {File} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the `purpose` is set to \\\"fine-tune\\\", each line is a JSON record with \\\"prompt\\\" and \\\"completion\\\" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
- * @param {string} purpose The intended purpose of the uploaded documents. Use \\\"fine-tune\\\" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createFile: async (file: File, purpose: string, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'file' is not null or undefined
- assertParamExists('createFile', 'file', file)
- // verify required parameter 'purpose' is not null or undefined
- assertParamExists('createFile', 'purpose', purpose)
- const localVarPath = `/files`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
- const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
-
-
- if (file !== undefined) {
- localVarFormParams.append('file', file as any);
- }
-
- if (purpose !== undefined) {
- localVarFormParams.append('purpose', purpose as any);
- }
-
-
- localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...localVarFormParams.getHeaders(), ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = localVarFormParams;
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
- * @param {CreateFineTuneRequest} createFineTuneRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createFineTune: async (createFineTuneRequest: CreateFineTuneRequest, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'createFineTuneRequest' is not null or undefined
- assertParamExists('createFineTune', 'createFineTuneRequest', createFineTuneRequest)
- const localVarPath = `/fine-tunes`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- localVarHeaderParameter['Content-Type'] = 'application/json';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = serializeDataIfNeeded(createFineTuneRequest, localVarRequestOptions, configuration)
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Creates an image given a prompt.
- * @param {CreateImageRequest} createImageRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createImage: async (createImageRequest: CreateImageRequest, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'createImageRequest' is not null or undefined
- assertParamExists('createImage', 'createImageRequest', createImageRequest)
- const localVarPath = `/images/generations`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- localVarHeaderParameter['Content-Type'] = 'application/json';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = serializeDataIfNeeded(createImageRequest, localVarRequestOptions, configuration)
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Creates an edited or extended image given an original image and a prompt.
- * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
- * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
- * @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
- * @param {number} [n] The number of images to generate. Must be between 1 and 10.
- * @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
- * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
- * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createImageEdit: async (image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'image' is not null or undefined
- assertParamExists('createImageEdit', 'image', image)
- // verify required parameter 'prompt' is not null or undefined
- assertParamExists('createImageEdit', 'prompt', prompt)
- const localVarPath = `/images/edits`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
- const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
-
-
- if (image !== undefined) {
- localVarFormParams.append('image', image as any);
- }
-
- if (mask !== undefined) {
- localVarFormParams.append('mask', mask as any);
- }
-
- if (prompt !== undefined) {
- localVarFormParams.append('prompt', prompt as any);
- }
-
- if (n !== undefined) {
- localVarFormParams.append('n', n as any);
- }
-
- if (size !== undefined) {
- localVarFormParams.append('size', size as any);
- }
-
- if (responseFormat !== undefined) {
- localVarFormParams.append('response_format', responseFormat as any);
- }
-
- if (user !== undefined) {
- localVarFormParams.append('user', user as any);
- }
-
-
- localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...localVarFormParams.getHeaders(), ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = localVarFormParams;
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Creates a variation of a given image.
- * @param {File} image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
- * @param {number} [n] The number of images to generate. Must be between 1 and 10.
- * @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
- * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
- * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createImageVariation: async (image: File, n?: number, size?: string, responseFormat?: string, user?: string, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'image' is not null or undefined
- assertParamExists('createImageVariation', 'image', image)
- const localVarPath = `/images/variations`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
- const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
-
-
- if (image !== undefined) {
- localVarFormParams.append('image', image as any);
- }
-
- if (n !== undefined) {
- localVarFormParams.append('n', n as any);
- }
-
- if (size !== undefined) {
- localVarFormParams.append('size', size as any);
- }
-
- if (responseFormat !== undefined) {
- localVarFormParams.append('response_format', responseFormat as any);
- }
-
- if (user !== undefined) {
- localVarFormParams.append('user', user as any);
- }
-
-
- localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...localVarFormParams.getHeaders(), ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = localVarFormParams;
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Classifies if text violates OpenAI\'s Content Policy
- * @param {CreateModerationRequest} createModerationRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createModeration: async (createModerationRequest: CreateModerationRequest, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'createModerationRequest' is not null or undefined
- assertParamExists('createModeration', 'createModerationRequest', createModerationRequest)
- const localVarPath = `/moderations`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- localVarHeaderParameter['Content-Type'] = 'application/json';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = serializeDataIfNeeded(createModerationRequest, localVarRequestOptions, configuration)
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
- * @param {string} engineId The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
- * @param {CreateSearchRequest} createSearchRequest
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- createSearch: async (engineId: string, createSearchRequest: CreateSearchRequest, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'engineId' is not null or undefined
- assertParamExists('createSearch', 'engineId', engineId)
- // verify required parameter 'createSearchRequest' is not null or undefined
- assertParamExists('createSearch', 'createSearchRequest', createSearchRequest)
- const localVarPath = `/engines/{engine_id}/search`
- .replace(`{${"engine_id"}}`, encodeURIComponent(String(engineId)));
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- localVarHeaderParameter['Content-Type'] = 'application/json';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = serializeDataIfNeeded(createSearchRequest, localVarRequestOptions, configuration)
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Transcribes audio into the input language.
- * @param {File} file The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
- * @param {string} model ID of the model to use. Only `whisper-1` is currently available.
- * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
- * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
- * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
- * @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createTranscription: async (file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'file' is not null or undefined
- assertParamExists('createTranscription', 'file', file)
- // verify required parameter 'model' is not null or undefined
- assertParamExists('createTranscription', 'model', model)
- const localVarPath = `/audio/transcriptions`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
- const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
-
-
- if (file !== undefined) {
- localVarFormParams.append('file', file as any);
- }
-
- if (model !== undefined) {
- localVarFormParams.append('model', model as any);
- }
-
- if (prompt !== undefined) {
- localVarFormParams.append('prompt', prompt as any);
- }
-
- if (responseFormat !== undefined) {
- localVarFormParams.append('response_format', responseFormat as any);
- }
-
- if (temperature !== undefined) {
- localVarFormParams.append('temperature', temperature as any);
- }
-
- if (language !== undefined) {
- localVarFormParams.append('language', language as any);
- }
-
-
- localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...localVarFormParams.getHeaders(), ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = localVarFormParams;
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Translates audio into into English.
- * @param {File} file The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
- * @param {string} model ID of the model to use. Only `whisper-1` is currently available.
- * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
- * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
- * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createTranslation: async (file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'file' is not null or undefined
- assertParamExists('createTranslation', 'file', file)
- // verify required parameter 'model' is not null or undefined
- assertParamExists('createTranslation', 'model', model)
- const localVarPath = `/audio/translations`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
- const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)();
-
-
- if (file !== undefined) {
- localVarFormParams.append('file', file as any);
- }
-
- if (model !== undefined) {
- localVarFormParams.append('model', model as any);
- }
-
- if (prompt !== undefined) {
- localVarFormParams.append('prompt', prompt as any);
- }
-
- if (responseFormat !== undefined) {
- localVarFormParams.append('response_format', responseFormat as any);
- }
-
- if (temperature !== undefined) {
- localVarFormParams.append('temperature', temperature as any);
- }
-
-
- localVarHeaderParameter['Content-Type'] = 'multipart/form-data';
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...localVarFormParams.getHeaders(), ...headersFromBaseOptions, ...options.headers};
- localVarRequestOptions.data = localVarFormParams;
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Delete a file.
- * @param {string} fileId The ID of the file to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- deleteFile: async (fileId: string, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'fileId' is not null or undefined
- assertParamExists('deleteFile', 'fileId', fileId)
- const localVarPath = `/files/{file_id}`
- .replace(`{${"file_id"}}`, encodeURIComponent(String(fileId)));
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'DELETE', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
- * @param {string} model The model to delete
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- deleteModel: async (model: string, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'model' is not null or undefined
- assertParamExists('deleteModel', 'model', model)
- const localVarPath = `/models/{model}`
- .replace(`{${"model"}}`, encodeURIComponent(String(model)));
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'DELETE', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Returns the contents of the specified file
- * @param {string} fileId The ID of the file to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- downloadFile: async (fileId: string, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'fileId' is not null or undefined
- assertParamExists('downloadFile', 'fileId', fileId)
- const localVarPath = `/files/{file_id}/content`
- .replace(`{${"file_id"}}`, encodeURIComponent(String(fileId)));
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- listEngines: async (options: AxiosRequestConfig = {}): Promise => {
- const localVarPath = `/engines`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Returns a list of files that belong to the user\'s organization.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- listFiles: async (options: AxiosRequestConfig = {}): Promise => {
- const localVarPath = `/files`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Get fine-grained status updates for a fine-tune job.
- * @param {string} fineTuneId The ID of the fine-tune job to get events for.
- * @param {boolean} [stream] Whether to stream events for the fine-tune job. If set to true, events will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available. The stream will terminate with a `data: [DONE]` message when the job is finished (succeeded, cancelled, or failed). If set to false, only events generated so far will be returned.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- listFineTuneEvents: async (fineTuneId: string, stream?: boolean, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'fineTuneId' is not null or undefined
- assertParamExists('listFineTuneEvents', 'fineTuneId', fineTuneId)
- const localVarPath = `/fine-tunes/{fine_tune_id}/events`
- .replace(`{${"fine_tune_id"}}`, encodeURIComponent(String(fineTuneId)));
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
- if (stream !== undefined) {
- localVarQueryParameter['stream'] = stream;
- }
-
-
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary List your organization\'s fine-tuning jobs
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- listFineTunes: async (options: AxiosRequestConfig = {}): Promise => {
- const localVarPath = `/fine-tunes`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- listModels: async (options: AxiosRequestConfig = {}): Promise => {
- const localVarPath = `/models`;
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
- * @param {string} engineId The ID of the engine to use for this request
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- retrieveEngine: async (engineId: string, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'engineId' is not null or undefined
- assertParamExists('retrieveEngine', 'engineId', engineId)
- const localVarPath = `/engines/{engine_id}`
- .replace(`{${"engine_id"}}`, encodeURIComponent(String(engineId)));
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Returns information about a specific file.
- * @param {string} fileId The ID of the file to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- retrieveFile: async (fileId: string, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'fileId' is not null or undefined
- assertParamExists('retrieveFile', 'fileId', fileId)
- const localVarPath = `/files/{file_id}`
- .replace(`{${"file_id"}}`, encodeURIComponent(String(fileId)));
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Gets info about the fine-tune job. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
- * @param {string} fineTuneId The ID of the fine-tune job
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- retrieveFineTune: async (fineTuneId: string, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'fineTuneId' is not null or undefined
- assertParamExists('retrieveFineTune', 'fineTuneId', fineTuneId)
- const localVarPath = `/fine-tunes/{fine_tune_id}`
- .replace(`{${"fine_tune_id"}}`, encodeURIComponent(String(fineTuneId)));
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- /**
- *
- * @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
- * @param {string} model The ID of the model to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- retrieveModel: async (model: string, options: AxiosRequestConfig = {}): Promise => {
- // verify required parameter 'model' is not null or undefined
- assertParamExists('retrieveModel', 'model', model)
- const localVarPath = `/models/{model}`
- .replace(`{${"model"}}`, encodeURIComponent(String(model)));
- // use dummy base URL string because the URL constructor only accepts absolute URLs.
- const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL);
- let baseOptions;
- if (configuration) {
- baseOptions = configuration.baseOptions;
- }
-
- const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options};
- const localVarHeaderParameter = {} as any;
- const localVarQueryParameter = {} as any;
-
-
-
- setSearchParams(localVarUrlObj, localVarQueryParameter);
- let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {};
- localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers};
-
- return {
- url: toPathString(localVarUrlObj),
- options: localVarRequestOptions,
- };
- },
- }
-};
-
-/**
- * OpenAIApi - functional programming interface
- * @export
- */
-export const OpenAIApiFp = function(configuration?: Configuration) {
- const localVarAxiosParamCreator = OpenAIApiAxiosParamCreator(configuration)
- return {
- /**
- *
- * @summary Immediately cancel a fine-tune job.
- * @param {string} fineTuneId The ID of the fine-tune job to cancel
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async cancelFineTune(fineTuneId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.cancelFineTune(fineTuneId, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
- * @param {CreateAnswerRequest} createAnswerRequest
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- async createAnswer(createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createAnswer(createAnswerRequest, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Creates a model response for the given chat conversation.
- * @param {CreateChatCompletionRequest} createChatCompletionRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createChatCompletion(createChatCompletionRequest, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
- * @param {CreateClassificationRequest} createClassificationRequest
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- async createClassification(createClassificationRequest: CreateClassificationRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createClassification(createClassificationRequest, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Creates a completion for the provided prompt and parameters.
- * @param {CreateCompletionRequest} createCompletionRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async createCompletion(createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createCompletion(createCompletionRequest, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Creates a new edit for the provided input, instruction, and parameters.
- * @param {CreateEditRequest} createEditRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async createEdit(createEditRequest: CreateEditRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createEdit(createEditRequest, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Creates an embedding vector representing the input text.
- * @param {CreateEmbeddingRequest} createEmbeddingRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async createEmbedding(createEmbeddingRequest: CreateEmbeddingRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createEmbedding(createEmbeddingRequest, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
- * @param {File} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the `purpose` is set to \\\"fine-tune\\\", each line is a JSON record with \\\"prompt\\\" and \\\"completion\\\" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
- * @param {string} purpose The intended purpose of the uploaded documents. Use \\\"fine-tune\\\" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async createFile(file: File, purpose: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createFile(file, purpose, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
- * @param {CreateFineTuneRequest} createFineTuneRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async createFineTune(createFineTuneRequest: CreateFineTuneRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createFineTune(createFineTuneRequest, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Creates an image given a prompt.
- * @param {CreateImageRequest} createImageRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async createImage(createImageRequest: CreateImageRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createImage(createImageRequest, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Creates an edited or extended image given an original image and a prompt.
- * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
- * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
- * @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
- * @param {number} [n] The number of images to generate. Must be between 1 and 10.
- * @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
- * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
- * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createImageEdit(image, prompt, mask, n, size, responseFormat, user, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Creates a variation of a given image.
- * @param {File} image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
- * @param {number} [n] The number of images to generate. Must be between 1 and 10.
- * @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
- * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
- * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async createImageVariation(image: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createImageVariation(image, n, size, responseFormat, user, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Classifies if text violates OpenAI\'s Content Policy
- * @param {CreateModerationRequest} createModerationRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async createModeration(createModerationRequest: CreateModerationRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createModeration(createModerationRequest, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
- * @param {string} engineId The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
- * @param {CreateSearchRequest} createSearchRequest
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- async createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createSearch(engineId, createSearchRequest, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Transcribes audio into the input language.
- * @param {File} file The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
- * @param {string} model ID of the model to use. Only `whisper-1` is currently available.
- * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
- * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
- * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
- * @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createTranscription(file, model, prompt, responseFormat, temperature, language, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Translates audio into into English.
- * @param {File} file The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
- * @param {string} model ID of the model to use. Only `whisper-1` is currently available.
- * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
- * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
- * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.createTranslation(file, model, prompt, responseFormat, temperature, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Delete a file.
- * @param {string} fileId The ID of the file to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async deleteFile(fileId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.deleteFile(fileId, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
- * @param {string} model The model to delete
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async deleteModel(model: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.deleteModel(model, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Returns the contents of the specified file
- * @param {string} fileId The ID of the file to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async downloadFile(fileId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.downloadFile(fileId, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- async listEngines(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.listEngines(options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Returns a list of files that belong to the user\'s organization.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async listFiles(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.listFiles(options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Get fine-grained status updates for a fine-tune job.
- * @param {string} fineTuneId The ID of the fine-tune job to get events for.
- * @param {boolean} [stream] Whether to stream events for the fine-tune job. If set to true, events will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available. The stream will terminate with a `data: [DONE]` message when the job is finished (succeeded, cancelled, or failed). If set to false, only events generated so far will be returned.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async listFineTuneEvents(fineTuneId: string, stream?: boolean, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.listFineTuneEvents(fineTuneId, stream, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary List your organization\'s fine-tuning jobs
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async listFineTunes(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.listFineTunes(options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async listModels(options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.listModels(options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
- * @param {string} engineId The ID of the engine to use for this request
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- async retrieveEngine(engineId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.retrieveEngine(engineId, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Returns information about a specific file.
- * @param {string} fileId The ID of the file to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async retrieveFile(fileId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.retrieveFile(fileId, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Gets info about the fine-tune job. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
- * @param {string} fineTuneId The ID of the fine-tune job
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async retrieveFineTune(fineTuneId: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.retrieveFineTune(fineTuneId, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- /**
- *
- * @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
- * @param {string} model The ID of the model to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- async retrieveModel(model: string, options?: AxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> {
- const localVarAxiosArgs = await localVarAxiosParamCreator.retrieveModel(model, options);
- return createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration);
- },
- }
-};
-
-/**
- * OpenAIApi - factory interface
- * @export
- */
-export const OpenAIApiFactory = function (configuration?: Configuration, basePath?: string, axios?: AxiosInstance) {
- const localVarFp = OpenAIApiFp(configuration)
- return {
- /**
- *
- * @summary Immediately cancel a fine-tune job.
- * @param {string} fineTuneId The ID of the fine-tune job to cancel
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- cancelFineTune(fineTuneId: string, options?: any): AxiosPromise {
- return localVarFp.cancelFineTune(fineTuneId, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
- * @param {CreateAnswerRequest} createAnswerRequest
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- createAnswer(createAnswerRequest: CreateAnswerRequest, options?: any): AxiosPromise {
- return localVarFp.createAnswer(createAnswerRequest, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Creates a model response for the given chat conversation.
- * @param {CreateChatCompletionRequest} createChatCompletionRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: any): AxiosPromise {
- return localVarFp.createChatCompletion(createChatCompletionRequest, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
- * @param {CreateClassificationRequest} createClassificationRequest
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- createClassification(createClassificationRequest: CreateClassificationRequest, options?: any): AxiosPromise {
- return localVarFp.createClassification(createClassificationRequest, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Creates a completion for the provided prompt and parameters.
- * @param {CreateCompletionRequest} createCompletionRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createCompletion(createCompletionRequest: CreateCompletionRequest, options?: any): AxiosPromise {
- return localVarFp.createCompletion(createCompletionRequest, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Creates a new edit for the provided input, instruction, and parameters.
- * @param {CreateEditRequest} createEditRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createEdit(createEditRequest: CreateEditRequest, options?: any): AxiosPromise {
- return localVarFp.createEdit(createEditRequest, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Creates an embedding vector representing the input text.
- * @param {CreateEmbeddingRequest} createEmbeddingRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createEmbedding(createEmbeddingRequest: CreateEmbeddingRequest, options?: any): AxiosPromise {
- return localVarFp.createEmbedding(createEmbeddingRequest, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
- * @param {File} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the `purpose` is set to \\\"fine-tune\\\", each line is a JSON record with \\\"prompt\\\" and \\\"completion\\\" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
- * @param {string} purpose The intended purpose of the uploaded documents. Use \\\"fine-tune\\\" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createFile(file: File, purpose: string, options?: any): AxiosPromise {
- return localVarFp.createFile(file, purpose, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
- * @param {CreateFineTuneRequest} createFineTuneRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createFineTune(createFineTuneRequest: CreateFineTuneRequest, options?: any): AxiosPromise {
- return localVarFp.createFineTune(createFineTuneRequest, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Creates an image given a prompt.
- * @param {CreateImageRequest} createImageRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createImage(createImageRequest: CreateImageRequest, options?: any): AxiosPromise {
- return localVarFp.createImage(createImageRequest, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Creates an edited or extended image given an original image and a prompt.
- * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
- * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
- * @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
- * @param {number} [n] The number of images to generate. Must be between 1 and 10.
- * @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
- * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
- * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: any): AxiosPromise {
- return localVarFp.createImageEdit(image, prompt, mask, n, size, responseFormat, user, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Creates a variation of a given image.
- * @param {File} image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
- * @param {number} [n] The number of images to generate. Must be between 1 and 10.
- * @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
- * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
- * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createImageVariation(image: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: any): AxiosPromise {
- return localVarFp.createImageVariation(image, n, size, responseFormat, user, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Classifies if text violates OpenAI\'s Content Policy
- * @param {CreateModerationRequest} createModerationRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createModeration(createModerationRequest: CreateModerationRequest, options?: any): AxiosPromise {
- return localVarFp.createModeration(createModerationRequest, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
- * @param {string} engineId The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
- * @param {CreateSearchRequest} createSearchRequest
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: any): AxiosPromise {
- return localVarFp.createSearch(engineId, createSearchRequest, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Transcribes audio into the input language.
- * @param {File} file The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
- * @param {string} model ID of the model to use. Only `whisper-1` is currently available.
- * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
- * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
- * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
- * @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: any): AxiosPromise {
- return localVarFp.createTranscription(file, model, prompt, responseFormat, temperature, language, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Translates audio into into English.
- * @param {File} file The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
- * @param {string} model ID of the model to use. Only `whisper-1` is currently available.
- * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
- * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
- * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: any): AxiosPromise {
- return localVarFp.createTranslation(file, model, prompt, responseFormat, temperature, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Delete a file.
- * @param {string} fileId The ID of the file to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- deleteFile(fileId: string, options?: any): AxiosPromise {
- return localVarFp.deleteFile(fileId, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
- * @param {string} model The model to delete
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- deleteModel(model: string, options?: any): AxiosPromise {
- return localVarFp.deleteModel(model, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Returns the contents of the specified file
- * @param {string} fileId The ID of the file to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- downloadFile(fileId: string, options?: any): AxiosPromise {
- return localVarFp.downloadFile(fileId, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- listEngines(options?: any): AxiosPromise {
- return localVarFp.listEngines(options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Returns a list of files that belong to the user\'s organization.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- listFiles(options?: any): AxiosPromise {
- return localVarFp.listFiles(options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Get fine-grained status updates for a fine-tune job.
- * @param {string} fineTuneId The ID of the fine-tune job to get events for.
- * @param {boolean} [stream] Whether to stream events for the fine-tune job. If set to true, events will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available. The stream will terminate with a `data: [DONE]` message when the job is finished (succeeded, cancelled, or failed). If set to false, only events generated so far will be returned.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- listFineTuneEvents(fineTuneId: string, stream?: boolean, options?: any): AxiosPromise {
- return localVarFp.listFineTuneEvents(fineTuneId, stream, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary List your organization\'s fine-tuning jobs
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- listFineTunes(options?: any): AxiosPromise {
- return localVarFp.listFineTunes(options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- listModels(options?: any): AxiosPromise {
- return localVarFp.listModels(options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
- * @param {string} engineId The ID of the engine to use for this request
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- retrieveEngine(engineId: string, options?: any): AxiosPromise {
- return localVarFp.retrieveEngine(engineId, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Returns information about a specific file.
- * @param {string} fileId The ID of the file to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- retrieveFile(fileId: string, options?: any): AxiosPromise {
- return localVarFp.retrieveFile(fileId, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Gets info about the fine-tune job. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
- * @param {string} fineTuneId The ID of the fine-tune job
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- retrieveFineTune(fineTuneId: string, options?: any): AxiosPromise {
- return localVarFp.retrieveFineTune(fineTuneId, options).then((request) => request(axios, basePath));
- },
- /**
- *
- * @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
- * @param {string} model The ID of the model to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- retrieveModel(model: string, options?: any): AxiosPromise {
- return localVarFp.retrieveModel(model, options).then((request) => request(axios, basePath));
- },
- };
-};
-
-/**
- * OpenAIApi - object-oriented interface
- * @export
- * @class OpenAIApi
- * @extends {BaseAPI}
- */
-export class OpenAIApi extends BaseAPI {
- /**
- *
- * @summary Immediately cancel a fine-tune job.
- * @param {string} fineTuneId The ID of the fine-tune job to cancel
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public cancelFineTune(fineTuneId: string, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).cancelFineTune(fineTuneId, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
- * @param {CreateAnswerRequest} createAnswerRequest
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createAnswer(createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createAnswer(createAnswerRequest, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Creates a model response for the given chat conversation.
- * @param {CreateChatCompletionRequest} createChatCompletionRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createChatCompletion(createChatCompletionRequest: CreateChatCompletionRequest, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createChatCompletion(createChatCompletionRequest, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Classifies the specified `query` using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded `file`, or explicitly listed in the request using the `examples` parameter for quick tests and small scale use cases.
- * @param {CreateClassificationRequest} createClassificationRequest
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createClassification(createClassificationRequest: CreateClassificationRequest, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createClassification(createClassificationRequest, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Creates a completion for the provided prompt and parameters.
- * @param {CreateCompletionRequest} createCompletionRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createCompletion(createCompletionRequest: CreateCompletionRequest, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createCompletion(createCompletionRequest, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Creates a new edit for the provided input, instruction, and parameters.
- * @param {CreateEditRequest} createEditRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createEdit(createEditRequest: CreateEditRequest, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createEdit(createEditRequest, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Creates an embedding vector representing the input text.
- * @param {CreateEmbeddingRequest} createEmbeddingRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createEmbedding(createEmbeddingRequest: CreateEmbeddingRequest, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createEmbedding(createEmbeddingRequest, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit.
- * @param {File} file Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded. If the `purpose` is set to \\\"fine-tune\\\", each line is a JSON record with \\\"prompt\\\" and \\\"completion\\\" fields representing your [training examples](/docs/guides/fine-tuning/prepare-training-data).
- * @param {string} purpose The intended purpose of the uploaded documents. Use \\\"fine-tune\\\" for [Fine-tuning](/docs/api-reference/fine-tunes). This allows us to validate the format of the uploaded file.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createFile(file: File, purpose: string, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createFile(file, purpose, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
- * @param {CreateFineTuneRequest} createFineTuneRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createFineTune(createFineTuneRequest: CreateFineTuneRequest, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createFineTune(createFineTuneRequest, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Creates an image given a prompt.
- * @param {CreateImageRequest} createImageRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createImage(createImageRequest: CreateImageRequest, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createImage(createImageRequest, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Creates an edited or extended image given an original image and a prompt.
- * @param {File} image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
- * @param {string} prompt A text description of the desired image(s). The maximum length is 1000 characters.
- * @param {File} [mask] An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
- * @param {number} [n] The number of images to generate. Must be between 1 and 10.
- * @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
- * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
- * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createImageEdit(image: File, prompt: string, mask?: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createImageEdit(image, prompt, mask, n, size, responseFormat, user, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Creates a variation of a given image.
- * @param {File} image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
- * @param {number} [n] The number of images to generate. Must be between 1 and 10.
- * @param {string} [size] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
- * @param {string} [responseFormat] The format in which the generated images are returned. Must be one of `url` or `b64_json`.
- * @param {string} [user] A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createImageVariation(image: File, n?: number, size?: string, responseFormat?: string, user?: string, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createImageVariation(image, n, size, responseFormat, user, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Classifies if text violates OpenAI\'s Content Policy
- * @param {CreateModerationRequest} createModerationRequest
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createModeration(createModerationRequest: CreateModerationRequest, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createModeration(createModerationRequest, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query.
- * @param {string} engineId The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
- * @param {CreateSearchRequest} createSearchRequest
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createSearch(engineId: string, createSearchRequest: CreateSearchRequest, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createSearch(engineId, createSearchRequest, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Transcribes audio into the input language.
- * @param {File} file The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
- * @param {string} model ID of the model to use. Only `whisper-1` is currently available.
- * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language.
- * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
- * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
- * @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createTranscription(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, language?: string, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createTranscription(file, model, prompt, responseFormat, temperature, language, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Translates audio into into English.
- * @param {File} file The audio file object (not file name) translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
- * @param {string} model ID of the model to use. Only `whisper-1` is currently available.
- * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English.
- * @param {string} [responseFormat] The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
- * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public createTranslation(file: File, model: string, prompt?: string, responseFormat?: string, temperature?: number, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).createTranslation(file, model, prompt, responseFormat, temperature, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Delete a file.
- * @param {string} fileId The ID of the file to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public deleteFile(fileId: string, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).deleteFile(fileId, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Delete a fine-tuned model. You must have the Owner role in your organization.
- * @param {string} model The model to delete
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public deleteModel(model: string, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).deleteModel(model, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Returns the contents of the specified file
- * @param {string} fileId The ID of the file to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public downloadFile(fileId: string, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).downloadFile(fileId, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability.
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public listEngines(options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).listEngines(options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Returns a list of files that belong to the user\'s organization.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public listFiles(options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).listFiles(options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Get fine-grained status updates for a fine-tune job.
- * @param {string} fineTuneId The ID of the fine-tune job to get events for.
- * @param {boolean} [stream] Whether to stream events for the fine-tune job. If set to true, events will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available. The stream will terminate with a `data: [DONE]` message when the job is finished (succeeded, cancelled, or failed). If set to false, only events generated so far will be returned.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public listFineTuneEvents(fineTuneId: string, stream?: boolean, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).listFineTuneEvents(fineTuneId, stream, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary List your organization\'s fine-tuning jobs
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public listFineTunes(options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).listFineTunes(options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Lists the currently available models, and provides basic information about each one such as the owner and availability.
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public listModels(options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).listModels(options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Retrieves a model instance, providing basic information about it such as the owner and availability.
- * @param {string} engineId The ID of the engine to use for this request
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public retrieveEngine(engineId: string, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).retrieveEngine(engineId, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Returns information about a specific file.
- * @param {string} fileId The ID of the file to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public retrieveFile(fileId: string, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).retrieveFile(fileId, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Gets info about the fine-tune job. [Learn more about Fine-tuning](/docs/guides/fine-tuning)
- * @param {string} fineTuneId The ID of the fine-tune job
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public retrieveFineTune(fineTuneId: string, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).retrieveFineTune(fineTuneId, options).then((request) => request(this.axios, this.basePath));
- }
-
- /**
- *
- * @summary Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
- * @param {string} model The ID of the model to use for this request
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- * @memberof OpenAIApi
- */
- public retrieveModel(model: string, options?: AxiosRequestConfig) {
- return OpenAIApiFp(this.configuration).retrieveModel(model, options).then((request) => request(this.axios, this.basePath));
- }
-}
-
-
diff --git a/base.ts b/base.ts
deleted file mode 100644
index a2c5eb0d4..000000000
--- a/base.ts
+++ /dev/null
@@ -1,72 +0,0 @@
-/* tslint:disable */
-/* eslint-disable */
-/**
- * OpenAI API
- * APIs for sampling from and fine-tuning language models
- *
- * The version of the OpenAPI document: 1.3.0
- *
- *
- * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
- * https://openapi-generator.tech
- * Do not edit the class manually.
- */
-
-
-import type { Configuration } from './configuration';
-// Some imports not used depending on template conditions
-// @ts-ignore
-import type { AxiosPromise, AxiosInstance, AxiosRequestConfig } from 'axios';
-import globalAxios from 'axios';
-
-export const BASE_PATH = "https://api.openai.com/v1".replace(/\/+$/, "");
-
-/**
- *
- * @export
- */
-export const COLLECTION_FORMATS = {
- csv: ",",
- ssv: " ",
- tsv: "\t",
- pipes: "|",
-};
-
-/**
- *
- * @export
- * @interface RequestArgs
- */
-export interface RequestArgs {
- url: string;
- options: AxiosRequestConfig;
-}
-
-/**
- *
- * @export
- * @class BaseAPI
- */
-export class BaseAPI {
- protected configuration: Configuration | undefined;
-
- constructor(configuration?: Configuration, protected basePath: string = BASE_PATH, protected axios: AxiosInstance = globalAxios) {
- if (configuration) {
- this.configuration = configuration;
- this.basePath = configuration.basePath || this.basePath;
- }
- }
-};
-
-/**
- *
- * @export
- * @class RequiredError
- * @extends {Error}
- */
-export class RequiredError extends Error {
- constructor(public field: string, msg?: string) {
- super(msg);
- this.name = "RequiredError"
- }
-}
diff --git a/bin/check-test-server b/bin/check-test-server
new file mode 100755
index 000000000..34efa9dac
--- /dev/null
+++ b/bin/check-test-server
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[0;33m'
+NC='\033[0m' # No Color
+
+function prism_is_running() {
+ curl --silent "http://localhost:4010" >/dev/null 2>&1
+}
+
+function is_overriding_api_base_url() {
+ [ -n "$API_BASE_URL" ]
+}
+
+if is_overriding_api_base_url ; then
+ # If someone is running the tests against the live API, we can trust they know
+ # what they're doing and exit early.
+ echo -e "${GREEN}✔ Running tests against ${API_BASE_URL}${NC}"
+
+ exit 0
+elif prism_is_running ; then
+ echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}"
+ echo
+
+ exit 0
+else
+ echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server"
+ echo -e "running against your OpenAPI spec."
+ echo
+ echo -e "${YELLOW}To fix:${NC}"
+ echo
+ echo -e "1. Install Prism (requires Node 16+):"
+ echo
+ echo -e " With npm:"
+ echo -e " \$ ${YELLOW}npm install -g @stoplight/prism-cli${NC}"
+ echo
+ echo -e " With yarn:"
+ echo -e " \$ ${YELLOW}yarn global add @stoplight/prism-cli${NC}"
+ echo
+ echo -e "2. Run the mock server"
+ echo
+ echo -e " To run the server, pass in the path of your OpenAPI"
+ echo -e " spec to the prism command:"
+ echo
+ echo -e " \$ ${YELLOW}prism mock path/to/your.openapi.yml${NC}"
+ echo
+
+ exit 1
+fi
diff --git a/bin/cli b/bin/cli
new file mode 100755
index 000000000..c2d110f19
--- /dev/null
+++ b/bin/cli
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+set -eou pipefail
+
+if [ $# -eq 0 ]; then
+ echo "Usage: $0 "
+ echo
+ echo "Subcommands:"
+ echo " migrate Run migrations to update from openai v3 to v4"
+ echo
+ exit 1
+fi
+
+if [ "$1" = "migrate" ]; then
+ echo "This automatic code migration is provided by grit.io"
+ echo "Visit https://app.grit.io/studio?preset=openai_v4 for more details."
+ shift
+ npx -y @getgrit/launcher apply openai_v4 "$@"
+else
+ echo "Unknown subcommand $1; Expected 'migrate'" >&2
+ exit 1
+fi
diff --git a/build b/build
new file mode 100755
index 000000000..85312ce8e
--- /dev/null
+++ b/build
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+set -exuo pipefail
+
+node scripts/check-version.cjs
+
+# Build into dist and will publish the package from there,
+# so that src/resources/foo.ts becomes /resources/foo.js
+# This way importing from `"openai/resources/foo"` works
+# even with `"moduleResolution": "node"`
+
+rm -rf dist; mkdir dist
+# Copy src to dist/src and build from dist/src into dist, so that
+# the source map for index.js.map will refer to ./src/index.ts etc
+cp -rp src README.md dist
+for file in LICENSE CHANGELOG.md; do
+ if [ -e "${file}" ]; then cp "${file}" dist; fi
+done
+if [ -e "bin/cli" ]; then
+ mkdir dist/bin
+ cp -p "bin/cli" dist/bin/;
+fi
+# this converts the export map paths for the dist directory
+# and does a few other minor things
+node scripts/make-dist-package-json.cjs > dist/package.json
+
+# build to .js/.mjs/.d.ts files
+npm exec tsc-multi
+# copy over handwritten .js/.mjs/.d.ts files
+cp src/_shims/*.{d.ts,js,mjs} dist/_shims
+npm exec tsc-alias -- -p tsconfig.build.json
+# we need to add exports = module.exports = OpenAI Node to index.js;
+# No way to get that from index.ts because it would cause compile errors
+# when building .mjs
+node scripts/fix-index-exports.cjs
+# with "moduleResolution": "nodenext", if ESM resolves to index.d.ts,
+# it'll have TS errors on the default import. But if it resolves to
+# index.d.mts the default import will work (even though both files have
+# the same export default statement)
+cp dist/index.d.ts dist/index.d.mts
+
+SED=(sed -i)
+if [[ "$OSTYPE" == "darwin"* ]]; then SED=(sed -i ''); fi
+
+# strip out lib="dom" and types="node" references; these are needed at build time,
+# but would pollute the user's TS environment
+REFERENCE_SUBS='s/^ *\/\/\/ * /dev/null && [ -e ./build-deno ]
+then
+ ./build-deno
+fi
diff --git a/common.ts b/common.ts
deleted file mode 100644
index 37662811c..000000000
--- a/common.ts
+++ /dev/null
@@ -1,150 +0,0 @@
-/* tslint:disable */
-/* eslint-disable */
-/**
- * OpenAI API
- * APIs for sampling from and fine-tuning language models
- *
- * The version of the OpenAPI document: 1.3.0
- *
- *
- * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
- * https://openapi-generator.tech
- * Do not edit the class manually.
- */
-
-
-import type { Configuration } from "./configuration";
-import type { RequestArgs } from "./base";
-import type { AxiosInstance, AxiosResponse } from 'axios';
-import { RequiredError } from "./base";
-
-/**
- *
- * @export
- */
-export const DUMMY_BASE_URL = 'https://example.com'
-
-/**
- *
- * @throws {RequiredError}
- * @export
- */
-export const assertParamExists = function (functionName: string, paramName: string, paramValue: unknown) {
- if (paramValue === null || paramValue === undefined) {
- throw new RequiredError(paramName, `Required parameter ${paramName} was null or undefined when calling ${functionName}.`);
- }
-}
-
-/**
- *
- * @export
- */
-export const setApiKeyToObject = async function (object: any, keyParamName: string, configuration?: Configuration) {
- if (configuration && configuration.apiKey) {
- const localVarApiKeyValue = typeof configuration.apiKey === 'function'
- ? await configuration.apiKey(keyParamName)
- : await configuration.apiKey;
- object[keyParamName] = localVarApiKeyValue;
- }
-}
-
-/**
- *
- * @export
- */
-export const setBasicAuthToObject = function (object: any, configuration?: Configuration) {
- if (configuration && (configuration.username || configuration.password)) {
- object["auth"] = { username: configuration.username, password: configuration.password };
- }
-}
-
-/**
- *
- * @export
- */
-export const setBearerAuthToObject = async function (object: any, configuration?: Configuration) {
- if (configuration && configuration.accessToken) {
- const accessToken = typeof configuration.accessToken === 'function'
- ? await configuration.accessToken()
- : await configuration.accessToken;
- object["Authorization"] = "Bearer " + accessToken;
- }
-}
-
-/**
- *
- * @export
- */
-export const setOAuthToObject = async function (object: any, name: string, scopes: string[], configuration?: Configuration) {
- if (configuration && configuration.accessToken) {
- const localVarAccessTokenValue = typeof configuration.accessToken === 'function'
- ? await configuration.accessToken(name, scopes)
- : await configuration.accessToken;
- object["Authorization"] = "Bearer " + localVarAccessTokenValue;
- }
-}
-
-function setFlattenedQueryParams(urlSearchParams: URLSearchParams, parameter: any, key: string = ""): void {
- if (parameter == null) return;
- if (typeof parameter === "object") {
- if (Array.isArray(parameter)) {
- (parameter as any[]).forEach(item => setFlattenedQueryParams(urlSearchParams, item, key));
- }
- else {
- Object.keys(parameter).forEach(currentKey =>
- setFlattenedQueryParams(urlSearchParams, parameter[currentKey], `${key}${key !== '' ? '.' : ''}${currentKey}`)
- );
- }
- }
- else {
- if (urlSearchParams.has(key)) {
- urlSearchParams.append(key, parameter);
- }
- else {
- urlSearchParams.set(key, parameter);
- }
- }
-}
-
-/**
- *
- * @export
- */
-export const setSearchParams = function (url: URL, ...objects: any[]) {
- const searchParams = new URLSearchParams(url.search);
- setFlattenedQueryParams(searchParams, objects);
- url.search = searchParams.toString();
-}
-
-/**
- *
- * @export
- */
-export const serializeDataIfNeeded = function (value: any, requestOptions: any, configuration?: Configuration) {
- const nonString = typeof value !== 'string';
- const needsSerialization = nonString && configuration && configuration.isJsonMime
- ? configuration.isJsonMime(requestOptions.headers['Content-Type'])
- : nonString;
- return needsSerialization
- ? JSON.stringify(value !== undefined ? value : {})
- : (value || "");
-}
-
-/**
- *
- * @export
- */
-export const toPathString = function (url: URL) {
- return url.pathname + url.search + url.hash
-}
-
-/**
- *
- * @export
- */
-export const createRequestFunction = function (axiosArgs: RequestArgs, globalAxios: AxiosInstance, BASE_PATH: string, configuration?: Configuration) {
- return >(axios: AxiosInstance = globalAxios, basePath: string = BASE_PATH) => {
- const axiosRequestArgs = {...axiosArgs.options, url: (configuration?.basePath || basePath) + axiosArgs.url};
- return axios.request(axiosRequestArgs);
- };
-}
diff --git a/configuration.ts b/configuration.ts
deleted file mode 100644
index 32127174c..000000000
--- a/configuration.ts
+++ /dev/null
@@ -1,127 +0,0 @@
-/* tslint:disable */
-/* eslint-disable */
-/**
- * OpenAI API
- * APIs for sampling from and fine-tuning language models
- *
- * The version of the OpenAPI document: 1.3.0
- *
- *
- * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
- * https://openapi-generator.tech
- * Do not edit the class manually.
- */
-
-
-const packageJson = require("../package.json");
-
-export interface ConfigurationParameters {
- apiKey?: string | Promise | ((name: string) => string) | ((name: string) => Promise);
- organization?: string;
- username?: string;
- password?: string;
- accessToken?: string | Promise | ((name?: string, scopes?: string[]) => string) | ((name?: string, scopes?: string[]) => Promise);
- basePath?: string;
- baseOptions?: any;
- formDataCtor?: new () => any;
-}
-
-export class Configuration {
- /**
- * parameter for apiKey security
- * @param name security name
- * @memberof Configuration
- */
- apiKey?: string | Promise | ((name: string) => string) | ((name: string) => Promise);
- /**
- * OpenAI organization id
- *
- * @type {string}
- * @memberof Configuration
- */
- organization?: string;
- /**
- * parameter for basic security
- *
- * @type {string}
- * @memberof Configuration
- */
- username?: string;
- /**
- * parameter for basic security
- *
- * @type {string}
- * @memberof Configuration
- */
- password?: string;
- /**
- * parameter for oauth2 security
- * @param name security name
- * @param scopes oauth2 scope
- * @memberof Configuration
- */
- accessToken?: string | Promise | ((name?: string, scopes?: string[]) => string) | ((name?: string, scopes?: string[]) => Promise);
- /**
- * override base path
- *
- * @type {string}
- * @memberof Configuration
- */
- basePath?: string;
- /**
- * base options for axios calls
- *
- * @type {any}
- * @memberof Configuration
- */
- baseOptions?: any;
- /**
- * The FormData constructor that will be used to create multipart form data
- * requests. You can inject this here so that execution environments that
- * do not support the FormData class can still run the generated client.
- *
- * @type {new () => FormData}
- */
- formDataCtor?: new () => any;
-
- constructor(param: ConfigurationParameters = {}) {
- this.apiKey = param.apiKey;
- this.organization = param.organization;
- this.username = param.username;
- this.password = param.password;
- this.accessToken = param.accessToken;
- this.basePath = param.basePath;
- this.baseOptions = param.baseOptions;
- this.formDataCtor = param.formDataCtor;
-
- if (!this.baseOptions) {
- this.baseOptions = {};
- }
- this.baseOptions.headers = {
- 'User-Agent': `OpenAI/NodeJS/${packageJson.version}`,
- 'Authorization': `Bearer ${this.apiKey}`,
- ...this.baseOptions.headers,
- }
- if (this.organization) {
- this.baseOptions.headers['OpenAI-Organization'] = this.organization;
- }
- if (!this.formDataCtor) {
- this.formDataCtor = require("form-data");
- }
- }
-
- /**
- * Check if the given MIME is a JSON MIME.
- * JSON MIME examples:
- * application/json
- * application/json; charset=UTF8
- * APPLICATION/JSON
- * application/vnd.company+json
- * @param mime - MIME (Multipurpose Internet Mail Extensions)
- * @return True if the given MIME is JSON, false otherwise.
- */
- public isJsonMime(mime: string): boolean {
- const jsonMime: RegExp = new RegExp('^(application\/json|[^;/ \t]+\/[^;/ \t]+[+]json)[ \t]*(;.*)?$', 'i');
- return mime !== null && (jsonMime.test(mime) || mime.toLowerCase() === 'application/json-patch+json');
- }
-}
\ No newline at end of file
diff --git a/dist/api.d.ts b/dist/api.d.ts
deleted file mode 100644
index d862f2bcb..000000000
--- a/dist/api.d.ts
+++ /dev/null
@@ -1,3003 +0,0 @@
-/**
- * OpenAI API
- * APIs for sampling from and fine-tuning language models
- *
- * The version of the OpenAPI document: 1.3.0
- *
- *
- * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
- * https://openapi-generator.tech
- * Do not edit the class manually.
- */
-import type { Configuration } from './configuration';
-import type { AxiosPromise, AxiosInstance, AxiosRequestConfig } from 'axios';
-import type { RequestArgs } from './base';
-import { BaseAPI } from './base';
-/**
- *
- * @export
- * @interface ChatCompletionFunctions
- */
-export interface ChatCompletionFunctions {
- /**
- * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
- * @type {string}
- * @memberof ChatCompletionFunctions
- */
- 'name': string;
- /**
- * The description of what the function does.
- * @type {string}
- * @memberof ChatCompletionFunctions
- */
- 'description'?: string;
- /**
- * The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
- * @type {{ [key: string]: any; }}
- * @memberof ChatCompletionFunctions
- */
- 'parameters'?: {
- [key: string]: any;
- };
-}
-/**
- *
- * @export
- * @interface ChatCompletionRequestMessage
- */
-export interface ChatCompletionRequestMessage {
- /**
- * The role of the messages author. One of `system`, `user`, `assistant`, or `function`.
- * @type {string}
- * @memberof ChatCompletionRequestMessage
- */
- 'role': ChatCompletionRequestMessageRoleEnum;
- /**
- * The contents of the message. `content` is required for all messages except assistant messages with function calls.
- * @type {string}
- * @memberof ChatCompletionRequestMessage
- */
- 'content'?: string;
- /**
- * The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
- * @type {string}
- * @memberof ChatCompletionRequestMessage
- */
- 'name'?: string;
- /**
- *
- * @type {ChatCompletionRequestMessageFunctionCall}
- * @memberof ChatCompletionRequestMessage
- */
- 'function_call'?: ChatCompletionRequestMessageFunctionCall;
-}
-export declare const ChatCompletionRequestMessageRoleEnum: {
- readonly System: "system";
- readonly User: "user";
- readonly Assistant: "assistant";
- readonly Function: "function";
-};
-export declare type ChatCompletionRequestMessageRoleEnum = typeof ChatCompletionRequestMessageRoleEnum[keyof typeof ChatCompletionRequestMessageRoleEnum];
-/**
- * The name and arguments of a function that should be called, as generated by the model.
- * @export
- * @interface ChatCompletionRequestMessageFunctionCall
- */
-export interface ChatCompletionRequestMessageFunctionCall {
- /**
- * The name of the function to call.
- * @type {string}
- * @memberof ChatCompletionRequestMessageFunctionCall
- */
- 'name'?: string;
- /**
- * The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
- * @type {string}
- * @memberof ChatCompletionRequestMessageFunctionCall
- */
- 'arguments'?: string;
-}
-/**
- *
- * @export
- * @interface ChatCompletionResponseMessage
- */
-export interface ChatCompletionResponseMessage {
- /**
- * The role of the author of this message.
- * @type {string}
- * @memberof ChatCompletionResponseMessage
- */
- 'role': ChatCompletionResponseMessageRoleEnum;
- /**
- * The contents of the message.
- * @type {string}
- * @memberof ChatCompletionResponseMessage
- */
- 'content'?: string;
- /**
- *
- * @type {ChatCompletionRequestMessageFunctionCall}
- * @memberof ChatCompletionResponseMessage
- */
- 'function_call'?: ChatCompletionRequestMessageFunctionCall;
-}
-export declare const ChatCompletionResponseMessageRoleEnum: {
- readonly System: "system";
- readonly User: "user";
- readonly Assistant: "assistant";
- readonly Function: "function";
-};
-export declare type ChatCompletionResponseMessageRoleEnum = typeof ChatCompletionResponseMessageRoleEnum[keyof typeof ChatCompletionResponseMessageRoleEnum];
-/**
- *
- * @export
- * @interface CreateAnswerRequest
- */
-export interface CreateAnswerRequest {
- /**
- * ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`.
- * @type {string}
- * @memberof CreateAnswerRequest
- */
- 'model': string;
- /**
- * Question to get answered.
- * @type {string}
- * @memberof CreateAnswerRequest
- */
- 'question': string;
- /**
- * List of (question, answer) pairs that will help steer the model towards the tone and answer format you\'d like. We recommend adding 2 to 3 examples.
- * @type {Array}
- * @memberof CreateAnswerRequest
- */
- 'examples': Array;
- /**
- * A text snippet containing the contextual information used to generate the answers for the `examples` you provide.
- * @type {string}
- * @memberof CreateAnswerRequest
- */
- 'examples_context': string;
- /**
- * List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. You should specify either `documents` or a `file`, but not both.
- * @type {Array}
- * @memberof CreateAnswerRequest
- */
- 'documents'?: Array | null;
- /**
- * The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. You should specify either `documents` or a `file`, but not both.
- * @type {string}
- * @memberof CreateAnswerRequest
- */
- 'file'?: string | null;
- /**
- * ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
- * @type {string}
- * @memberof CreateAnswerRequest
- */
- 'search_model'?: string | null;
- /**
- * The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost.
- * @type {number}
- * @memberof CreateAnswerRequest
- */
- 'max_rerank'?: number | null;
- /**
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
- * @type {number}
- * @memberof CreateAnswerRequest
- */
- 'temperature'?: number | null;
- /**
- * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
- * @type {number}
- * @memberof CreateAnswerRequest
- */
- 'logprobs'?: number | null;
- /**
- * The maximum number of tokens allowed for the generated answer
- * @type {number}
- * @memberof CreateAnswerRequest
- */
- 'max_tokens'?: number | null;
- /**
- *
- * @type {CreateAnswerRequestStop}
- * @memberof CreateAnswerRequest
- */
- 'stop'?: CreateAnswerRequestStop | null;
- /**
- * How many answers to generate for each question.
- * @type {number}
- * @memberof CreateAnswerRequest
- */
- 'n'?: number | null;
- /**
- * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
- * @type {object}
- * @memberof CreateAnswerRequest
- */
- 'logit_bias'?: object | null;
- /**
- * A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \"metadata\" field. This flag only takes effect when `file` is set.
- * @type {boolean}
- * @memberof CreateAnswerRequest
- */
- 'return_metadata'?: boolean | null;
- /**
- * If set to `true`, the returned JSON will include a \"prompt\" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes.
- * @type {boolean}
- * @memberof CreateAnswerRequest
- */
- 'return_prompt'?: boolean | null;
- /**
- * If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion.
- * @type {Array}
- * @memberof CreateAnswerRequest
- */
- 'expand'?: Array | null;
- /**
- * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @type {string}
- * @memberof CreateAnswerRequest
- */
- 'user'?: string;
-}
-/**
- * @type CreateAnswerRequestStop
- * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
- * @export
- */
-export declare type CreateAnswerRequestStop = Array | string;
-/**
- *
- * @export
- * @interface CreateAnswerResponse
- */
-export interface CreateAnswerResponse {
- /**
- *
- * @type {string}
- * @memberof CreateAnswerResponse
- */
- 'object'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateAnswerResponse
- */
- 'model'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateAnswerResponse
- */
- 'search_model'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateAnswerResponse
- */
- 'completion'?: string;
- /**
- *
- * @type {Array}
- * @memberof CreateAnswerResponse
- */
- 'answers'?: Array;
- /**
- *
- * @type {Array}
- * @memberof CreateAnswerResponse
- */
- 'selected_documents'?: Array;
-}
-/**
- *
- * @export
- * @interface CreateAnswerResponseSelectedDocumentsInner
- */
-export interface CreateAnswerResponseSelectedDocumentsInner {
- /**
- *
- * @type {number}
- * @memberof CreateAnswerResponseSelectedDocumentsInner
- */
- 'document'?: number;
- /**
- *
- * @type {string}
- * @memberof CreateAnswerResponseSelectedDocumentsInner
- */
- 'text'?: string;
-}
-/**
- *
- * @export
- * @interface CreateChatCompletionRequest
- */
-export interface CreateChatCompletionRequest {
- /**
- * ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.
- * @type {string}
- * @memberof CreateChatCompletionRequest
- */
- 'model': string;
- /**
- * A list of messages comprising the conversation so far. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb).
- * @type {Array}
- * @memberof CreateChatCompletionRequest
- */
- 'messages': Array;
- /**
- * A list of functions the model may generate JSON inputs for.
- * @type {Array}
- * @memberof CreateChatCompletionRequest
- */
- 'functions'?: Array;
- /**
- *
- * @type {CreateChatCompletionRequestFunctionCall}
- * @memberof CreateChatCompletionRequest
- */
- 'function_call'?: CreateChatCompletionRequestFunctionCall;
- /**
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
- * @type {number}
- * @memberof CreateChatCompletionRequest
- */
- 'temperature'?: number | null;
- /**
- * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
- * @type {number}
- * @memberof CreateChatCompletionRequest
- */
- 'top_p'?: number | null;
- /**
- * How many chat completion choices to generate for each input message.
- * @type {number}
- * @memberof CreateChatCompletionRequest
- */
- 'n'?: number | null;
- /**
- * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
- * @type {boolean}
- * @memberof CreateChatCompletionRequest
- */
- 'stream'?: boolean | null;
- /**
- *
- * @type {CreateChatCompletionRequestStop}
- * @memberof CreateChatCompletionRequest
- */
- 'stop'?: CreateChatCompletionRequestStop;
- /**
- * The maximum number of [tokens](/tokenizer) to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model\'s context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
- * @type {number}
- * @memberof CreateChatCompletionRequest
- */
- 'max_tokens'?: number;
- /**
- * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
- * @type {number}
- * @memberof CreateChatCompletionRequest
- */
- 'presence_penalty'?: number | null;
- /**
- * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
- * @type {number}
- * @memberof CreateChatCompletionRequest
- */
- 'frequency_penalty'?: number | null;
- /**
- * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
- * @type {object}
- * @memberof CreateChatCompletionRequest
- */
- 'logit_bias'?: object | null;
- /**
- * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @type {string}
- * @memberof CreateChatCompletionRequest
- */
- 'user'?: string;
-}
-/**
- * @type CreateChatCompletionRequestFunctionCall
- * Controls how the model responds to function calls. \"none\" means the model does not call a function, and responds to the end-user. \"auto\" means the model can pick between an end-user or calling a function. Specifying a particular function via `{\"name\":\\ \"my_function\"}` forces the model to call that function. \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.
- * @export
- */
-export declare type CreateChatCompletionRequestFunctionCall = CreateChatCompletionRequestFunctionCallOneOf | string;
-/**
- *
- * @export
- * @interface CreateChatCompletionRequestFunctionCallOneOf
- */
-export interface CreateChatCompletionRequestFunctionCallOneOf {
- /**
- * The name of the function to call.
- * @type {string}
- * @memberof CreateChatCompletionRequestFunctionCallOneOf
- */
- 'name': string;
-}
-/**
- * @type CreateChatCompletionRequestStop
- * Up to 4 sequences where the API will stop generating further tokens.
- * @export
- */
-export declare type CreateChatCompletionRequestStop = Array | string;
-/**
- *
- * @export
- * @interface CreateChatCompletionResponse
- */
-export interface CreateChatCompletionResponse {
- /**
- *
- * @type {string}
- * @memberof CreateChatCompletionResponse
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof CreateChatCompletionResponse
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof CreateChatCompletionResponse
- */
- 'created': number;
- /**
- *
- * @type {string}
- * @memberof CreateChatCompletionResponse
- */
- 'model': string;
- /**
- *
- * @type {Array}
- * @memberof CreateChatCompletionResponse
- */
- 'choices': Array;
- /**
- *
- * @type {CreateCompletionResponseUsage}
- * @memberof CreateChatCompletionResponse
- */
- 'usage'?: CreateCompletionResponseUsage;
-}
-/**
- *
- * @export
- * @interface CreateChatCompletionResponseChoicesInner
- */
-export interface CreateChatCompletionResponseChoicesInner {
- /**
- *
- * @type {number}
- * @memberof CreateChatCompletionResponseChoicesInner
- */
- 'index'?: number;
- /**
- *
- * @type {ChatCompletionResponseMessage}
- * @memberof CreateChatCompletionResponseChoicesInner
- */
- 'message'?: ChatCompletionResponseMessage;
- /**
- *
- * @type {string}
- * @memberof CreateChatCompletionResponseChoicesInner
- */
- 'finish_reason'?: string;
-}
-/**
- *
- * @export
- * @interface CreateClassificationRequest
- */
-export interface CreateClassificationRequest {
- /**
- * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
- * @type {string}
- * @memberof CreateClassificationRequest
- */
- 'model': string;
- /**
- * Query to be classified.
- * @type {string}
- * @memberof CreateClassificationRequest
- */
- 'query': string;
- /**
- * A list of examples with labels, in the following format: `[[\"The movie is so interesting.\", \"Positive\"], [\"It is quite boring.\", \"Negative\"], ...]` All the label strings will be normalized to be capitalized. You should specify either `examples` or `file`, but not both.
- * @type {Array}
- * @memberof CreateClassificationRequest
- */
- 'examples'?: Array | null;
- /**
- * The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. You should specify either `examples` or `file`, but not both.
- * @type {string}
- * @memberof CreateClassificationRequest
- */
- 'file'?: string | null;
- /**
- * The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized.
- * @type {Array}
- * @memberof CreateClassificationRequest
- */
- 'labels'?: Array | null;
- /**
- * ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
- * @type {string}
- * @memberof CreateClassificationRequest
- */
- 'search_model'?: string | null;
- /**
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
- * @type {number}
- * @memberof CreateClassificationRequest
- */
- 'temperature'?: number | null;
- /**
- * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
- * @type {number}
- * @memberof CreateClassificationRequest
- */
- 'logprobs'?: number | null;
- /**
- * The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost.
- * @type {number}
- * @memberof CreateClassificationRequest
- */
- 'max_examples'?: number | null;
- /**
- * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
- * @type {object}
- * @memberof CreateClassificationRequest
- */
- 'logit_bias'?: object | null;
- /**
- * If set to `true`, the returned JSON will include a \"prompt\" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes.
- * @type {boolean}
- * @memberof CreateClassificationRequest
- */
- 'return_prompt'?: boolean | null;
- /**
- * A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \"metadata\" field. This flag only takes effect when `file` is set.
- * @type {boolean}
- * @memberof CreateClassificationRequest
- */
- 'return_metadata'?: boolean | null;
- /**
- * If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion.
- * @type {Array}
- * @memberof CreateClassificationRequest
- */
- 'expand'?: Array | null;
- /**
- * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @type {string}
- * @memberof CreateClassificationRequest
- */
- 'user'?: string;
-}
-/**
- *
- * @export
- * @interface CreateClassificationResponse
- */
-export interface CreateClassificationResponse {
- /**
- *
- * @type {string}
- * @memberof CreateClassificationResponse
- */
- 'object'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateClassificationResponse
- */
- 'model'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateClassificationResponse
- */
- 'search_model'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateClassificationResponse
- */
- 'completion'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateClassificationResponse
- */
- 'label'?: string;
- /**
- *
- * @type {Array}
- * @memberof CreateClassificationResponse
- */
- 'selected_examples'?: Array;
-}
-/**
- *
- * @export
- * @interface CreateClassificationResponseSelectedExamplesInner
- */
-export interface CreateClassificationResponseSelectedExamplesInner {
- /**
- *
- * @type {number}
- * @memberof CreateClassificationResponseSelectedExamplesInner
- */
- 'document'?: number;
- /**
- *
- * @type {string}
- * @memberof CreateClassificationResponseSelectedExamplesInner
- */
- 'text'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateClassificationResponseSelectedExamplesInner
- */
- 'label'?: string;
-}
-/**
- *
- * @export
- * @interface CreateCompletionRequest
- */
-export interface CreateCompletionRequest {
- /**
- * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
- * @type {string}
- * @memberof CreateCompletionRequest
- */
- 'model': string;
- /**
- *
- * @type {CreateCompletionRequestPrompt}
- * @memberof CreateCompletionRequest
- */
- 'prompt'?: CreateCompletionRequestPrompt | null;
- /**
- * The suffix that comes after a completion of inserted text.
- * @type {string}
- * @memberof CreateCompletionRequest
- */
- 'suffix'?: string | null;
- /**
- * The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model\'s context length. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'max_tokens'?: number | null;
- /**
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'temperature'?: number | null;
- /**
- * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'top_p'?: number | null;
- /**
- * How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'n'?: number | null;
- /**
- * Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb).
- * @type {boolean}
- * @memberof CreateCompletionRequest
- */
- 'stream'?: boolean | null;
- /**
- * Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5.
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'logprobs'?: number | null;
- /**
- * Echo back the prompt in addition to the completion
- * @type {boolean}
- * @memberof CreateCompletionRequest
- */
- 'echo'?: boolean | null;
- /**
- *
- * @type {CreateCompletionRequestStop}
- * @memberof CreateCompletionRequest
- */
- 'stop'?: CreateCompletionRequestStop | null;
- /**
- * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'presence_penalty'?: number | null;
- /**
- * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'frequency_penalty'?: number | null;
- /**
- * Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
- * @type {number}
- * @memberof CreateCompletionRequest
- */
- 'best_of'?: number | null;
- /**
- * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.
- * @type {object}
- * @memberof CreateCompletionRequest
- */
- 'logit_bias'?: object | null;
- /**
- * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @type {string}
- * @memberof CreateCompletionRequest
- */
- 'user'?: string;
-}
-/**
- * @type CreateCompletionRequestPrompt
- * The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
- * @export
- */
-export declare type CreateCompletionRequestPrompt = Array | Array | Array | string;
-/**
- * @type CreateCompletionRequestStop
- * Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
- * @export
- */
-export declare type CreateCompletionRequestStop = Array | string;
-/**
- *
- * @export
- * @interface CreateCompletionResponse
- */
-export interface CreateCompletionResponse {
- /**
- *
- * @type {string}
- * @memberof CreateCompletionResponse
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof CreateCompletionResponse
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof CreateCompletionResponse
- */
- 'created': number;
- /**
- *
- * @type {string}
- * @memberof CreateCompletionResponse
- */
- 'model': string;
- /**
- *
- * @type {Array}
- * @memberof CreateCompletionResponse
- */
- 'choices': Array;
- /**
- *
- * @type {CreateCompletionResponseUsage}
- * @memberof CreateCompletionResponse
- */
- 'usage'?: CreateCompletionResponseUsage;
-}
-/**
- *
- * @export
- * @interface CreateCompletionResponseChoicesInner
- */
-export interface CreateCompletionResponseChoicesInner {
- /**
- *
- * @type {string}
- * @memberof CreateCompletionResponseChoicesInner
- */
- 'text'?: string;
- /**
- *
- * @type {number}
- * @memberof CreateCompletionResponseChoicesInner
- */
- 'index'?: number;
- /**
- *
- * @type {CreateCompletionResponseChoicesInnerLogprobs}
- * @memberof CreateCompletionResponseChoicesInner
- */
- 'logprobs'?: CreateCompletionResponseChoicesInnerLogprobs | null;
- /**
- *
- * @type {string}
- * @memberof CreateCompletionResponseChoicesInner
- */
- 'finish_reason'?: string;
-}
-/**
- *
- * @export
- * @interface CreateCompletionResponseChoicesInnerLogprobs
- */
-export interface CreateCompletionResponseChoicesInnerLogprobs {
- /**
- *
- * @type {Array}
- * @memberof CreateCompletionResponseChoicesInnerLogprobs
- */
- 'tokens'?: Array;
- /**
- *
- * @type {Array}
- * @memberof CreateCompletionResponseChoicesInnerLogprobs
- */
- 'token_logprobs'?: Array;
- /**
- *
- * @type {Array}
- * @memberof CreateCompletionResponseChoicesInnerLogprobs
- */
- 'top_logprobs'?: Array;
- /**
- *
- * @type {Array}
- * @memberof CreateCompletionResponseChoicesInnerLogprobs
- */
- 'text_offset'?: Array;
-}
-/**
- *
- * @export
- * @interface CreateCompletionResponseUsage
- */
-export interface CreateCompletionResponseUsage {
- /**
- *
- * @type {number}
- * @memberof CreateCompletionResponseUsage
- */
- 'prompt_tokens': number;
- /**
- *
- * @type {number}
- * @memberof CreateCompletionResponseUsage
- */
- 'completion_tokens': number;
- /**
- *
- * @type {number}
- * @memberof CreateCompletionResponseUsage
- */
- 'total_tokens': number;
-}
-/**
- *
- * @export
- * @interface CreateEditRequest
- */
-export interface CreateEditRequest {
- /**
- * ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint.
- * @type {string}
- * @memberof CreateEditRequest
- */
- 'model': string;
- /**
- * The input text to use as a starting point for the edit.
- * @type {string}
- * @memberof CreateEditRequest
- */
- 'input'?: string | null;
- /**
- * The instruction that tells the model how to edit the prompt.
- * @type {string}
- * @memberof CreateEditRequest
- */
- 'instruction': string;
- /**
- * How many edits to generate for the input and instruction.
- * @type {number}
- * @memberof CreateEditRequest
- */
- 'n'?: number | null;
- /**
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
- * @type {number}
- * @memberof CreateEditRequest
- */
- 'temperature'?: number | null;
- /**
- * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
- * @type {number}
- * @memberof CreateEditRequest
- */
- 'top_p'?: number | null;
-}
-/**
- *
- * @export
- * @interface CreateEditResponse
- */
-export interface CreateEditResponse {
- /**
- *
- * @type {string}
- * @memberof CreateEditResponse
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof CreateEditResponse
- */
- 'created': number;
- /**
- *
- * @type {Array}
- * @memberof CreateEditResponse
- */
- 'choices': Array;
- /**
- *
- * @type {CreateCompletionResponseUsage}
- * @memberof CreateEditResponse
- */
- 'usage': CreateCompletionResponseUsage;
-}
-/**
- *
- * @export
- * @interface CreateEmbeddingRequest
- */
-export interface CreateEmbeddingRequest {
- /**
- * ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them.
- * @type {string}
- * @memberof CreateEmbeddingRequest
- */
- 'model': string;
- /**
- *
- * @type {CreateEmbeddingRequestInput}
- * @memberof CreateEmbeddingRequest
- */
- 'input': CreateEmbeddingRequestInput;
- /**
- * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @type {string}
- * @memberof CreateEmbeddingRequest
- */
- 'user'?: string;
-}
-/**
- * @type CreateEmbeddingRequestInput
- * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed the max input tokens for the model (8191 tokens for `text-embedding-ada-002`). [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) for counting tokens.
- * @export
- */
-export declare type CreateEmbeddingRequestInput = Array | Array | Array | string;
-/**
- *
- * @export
- * @interface CreateEmbeddingResponse
- */
-export interface CreateEmbeddingResponse {
- /**
- *
- * @type {string}
- * @memberof CreateEmbeddingResponse
- */
- 'object': string;
- /**
- *
- * @type {string}
- * @memberof CreateEmbeddingResponse
- */
- 'model': string;
- /**
- *
- * @type {Array}
- * @memberof CreateEmbeddingResponse
- */
- 'data': Array;
- /**
- *
- * @type {CreateEmbeddingResponseUsage}
- * @memberof CreateEmbeddingResponse
- */
- 'usage': CreateEmbeddingResponseUsage;
-}
-/**
- *
- * @export
- * @interface CreateEmbeddingResponseDataInner
- */
-export interface CreateEmbeddingResponseDataInner {
- /**
- *
- * @type {number}
- * @memberof CreateEmbeddingResponseDataInner
- */
- 'index': number;
- /**
- *
- * @type {string}
- * @memberof CreateEmbeddingResponseDataInner
- */
- 'object': string;
- /**
- *
- * @type {Array}
- * @memberof CreateEmbeddingResponseDataInner
- */
- 'embedding': Array;
-}
-/**
- *
- * @export
- * @interface CreateEmbeddingResponseUsage
- */
-export interface CreateEmbeddingResponseUsage {
- /**
- *
- * @type {number}
- * @memberof CreateEmbeddingResponseUsage
- */
- 'prompt_tokens': number;
- /**
- *
- * @type {number}
- * @memberof CreateEmbeddingResponseUsage
- */
- 'total_tokens': number;
-}
-/**
- *
- * @export
- * @interface CreateFineTuneRequest
- */
-export interface CreateFineTuneRequest {
- /**
- * The ID of an uploaded file that contains training data. See [upload file](/docs/api-reference/files/upload) for how to upload a file. Your dataset must be formatted as a JSONL file, where each training example is a JSON object with the keys \"prompt\" and \"completion\". Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.
- * @type {string}
- * @memberof CreateFineTuneRequest
- */
- 'training_file': string;
- /**
- * The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the [fine-tuning results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). Your train and validation data should be mutually exclusive. Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys \"prompt\" and \"completion\". Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](/docs/guides/fine-tuning/creating-training-data) for more details.
- * @type {string}
- * @memberof CreateFineTuneRequest
- */
- 'validation_file'?: string | null;
- /**
- * The name of the base model to fine-tune. You can select one of \"ada\", \"babbage\", \"curie\", \"davinci\", or a fine-tuned model created after 2022-04-21. To learn more about these models, see the [Models](https://platform.openai.com/docs/models) documentation.
- * @type {string}
- * @memberof CreateFineTuneRequest
- */
- 'model'?: string | null;
- /**
- * The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
- * @type {number}
- * @memberof CreateFineTuneRequest
- */
- 'n_epochs'?: number | null;
- /**
- * The batch size to use for training. The batch size is the number of training examples used to train a single forward and backward pass. By default, the batch size will be dynamically configured to be ~0.2% of the number of examples in the training set, capped at 256 - in general, we\'ve found that larger batch sizes tend to work better for larger datasets.
- * @type {number}
- * @memberof CreateFineTuneRequest
- */
- 'batch_size'?: number | null;
- /**
- * The learning rate multiplier to use for training. The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value. By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final `batch_size` (larger learning rates tend to perform better with larger batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results.
- * @type {number}
- * @memberof CreateFineTuneRequest
- */
- 'learning_rate_multiplier'?: number | null;
- /**
- * The weight to use for loss on the prompt tokens. This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0), and can add a stabilizing effect to training when completions are short. If prompts are extremely long (relative to completions), it may make sense to reduce this weight so as to avoid over-prioritizing learning the prompt.
- * @type {number}
- * @memberof CreateFineTuneRequest
- */
- 'prompt_loss_weight'?: number | null;
- /**
- * If set, we calculate classification-specific metrics such as accuracy and F-1 score using the validation set at the end of every epoch. These metrics can be viewed in the [results file](/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). In order to compute classification metrics, you must provide a `validation_file`. Additionally, you must specify `classification_n_classes` for multiclass classification or `classification_positive_class` for binary classification.
- * @type {boolean}
- * @memberof CreateFineTuneRequest
- */
- 'compute_classification_metrics'?: boolean | null;
- /**
- * The number of classes in a classification task. This parameter is required for multiclass classification.
- * @type {number}
- * @memberof CreateFineTuneRequest
- */
- 'classification_n_classes'?: number | null;
- /**
- * The positive class in binary classification. This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification.
- * @type {string}
- * @memberof CreateFineTuneRequest
- */
- 'classification_positive_class'?: string | null;
- /**
- * If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score is a generalization of F-1 score. This is only used for binary classification. With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger beta score puts more weight on recall and less on precision. A smaller beta score puts more weight on precision and less on recall.
- * @type {Array}
- * @memberof CreateFineTuneRequest
- */
- 'classification_betas'?: Array | null;
- /**
- * A string of up to 40 characters that will be added to your fine-tuned model name. For example, a `suffix` of \"custom-model-name\" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`.
- * @type {string}
- * @memberof CreateFineTuneRequest
- */
- 'suffix'?: string | null;
-}
-/**
- *
- * @export
- * @interface CreateImageRequest
- */
-export interface CreateImageRequest {
- /**
- * A text description of the desired image(s). The maximum length is 1000 characters.
- * @type {string}
- * @memberof CreateImageRequest
- */
- 'prompt': string;
- /**
- * The number of images to generate. Must be between 1 and 10.
- * @type {number}
- * @memberof CreateImageRequest
- */
- 'n'?: number | null;
- /**
- * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`.
- * @type {string}
- * @memberof CreateImageRequest
- */
- 'size'?: CreateImageRequestSizeEnum;
- /**
- * The format in which the generated images are returned. Must be one of `url` or `b64_json`.
- * @type {string}
- * @memberof CreateImageRequest
- */
- 'response_format'?: CreateImageRequestResponseFormatEnum;
- /**
- * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @type {string}
- * @memberof CreateImageRequest
- */
- 'user'?: string;
-}
-export declare const CreateImageRequestSizeEnum: {
- readonly _256x256: "256x256";
- readonly _512x512: "512x512";
- readonly _1024x1024: "1024x1024";
-};
-export declare type CreateImageRequestSizeEnum = typeof CreateImageRequestSizeEnum[keyof typeof CreateImageRequestSizeEnum];
-export declare const CreateImageRequestResponseFormatEnum: {
- readonly Url: "url";
- readonly B64Json: "b64_json";
-};
-export declare type CreateImageRequestResponseFormatEnum = typeof CreateImageRequestResponseFormatEnum[keyof typeof CreateImageRequestResponseFormatEnum];
-/**
- *
- * @export
- * @interface CreateModerationRequest
- */
-export interface CreateModerationRequest {
- /**
- *
- * @type {CreateModerationRequestInput}
- * @memberof CreateModerationRequest
- */
- 'input': CreateModerationRequestInput;
- /**
- * Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
- * @type {string}
- * @memberof CreateModerationRequest
- */
- 'model'?: string;
-}
-/**
- * @type CreateModerationRequestInput
- * The input text to classify
- * @export
- */
-export declare type CreateModerationRequestInput = Array | string;
-/**
- *
- * @export
- * @interface CreateModerationResponse
- */
-export interface CreateModerationResponse {
- /**
- *
- * @type {string}
- * @memberof CreateModerationResponse
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof CreateModerationResponse
- */
- 'model': string;
- /**
- *
- * @type {Array}
- * @memberof CreateModerationResponse
- */
- 'results': Array;
-}
-/**
- *
- * @export
- * @interface CreateModerationResponseResultsInner
- */
-export interface CreateModerationResponseResultsInner {
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInner
- */
- 'flagged': boolean;
- /**
- *
- * @type {CreateModerationResponseResultsInnerCategories}
- * @memberof CreateModerationResponseResultsInner
- */
- 'categories': CreateModerationResponseResultsInnerCategories;
- /**
- *
- * @type {CreateModerationResponseResultsInnerCategoryScores}
- * @memberof CreateModerationResponseResultsInner
- */
- 'category_scores': CreateModerationResponseResultsInnerCategoryScores;
-}
-/**
- *
- * @export
- * @interface CreateModerationResponseResultsInnerCategories
- */
-export interface CreateModerationResponseResultsInnerCategories {
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInnerCategories
- */
- 'hate': boolean;
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInnerCategories
- */
- 'hate/threatening': boolean;
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInnerCategories
- */
- 'self-harm': boolean;
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInnerCategories
- */
- 'sexual': boolean;
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInnerCategories
- */
- 'sexual/minors': boolean;
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInnerCategories
- */
- 'violence': boolean;
- /**
- *
- * @type {boolean}
- * @memberof CreateModerationResponseResultsInnerCategories
- */
- 'violence/graphic': boolean;
-}
-/**
- *
- * @export
- * @interface CreateModerationResponseResultsInnerCategoryScores
- */
-export interface CreateModerationResponseResultsInnerCategoryScores {
- /**
- *
- * @type {number}
- * @memberof CreateModerationResponseResultsInnerCategoryScores
- */
- 'hate': number;
- /**
- *
- * @type {number}
- * @memberof CreateModerationResponseResultsInnerCategoryScores
- */
- 'hate/threatening': number;
- /**
- *
- * @type {number}
- * @memberof CreateModerationResponseResultsInnerCategoryScores
- */
- 'self-harm': number;
- /**
- *
- * @type {number}
- * @memberof CreateModerationResponseResultsInnerCategoryScores
- */
- 'sexual': number;
- /**
- *
- * @type {number}
- * @memberof CreateModerationResponseResultsInnerCategoryScores
- */
- 'sexual/minors': number;
- /**
- *
- * @type {number}
- * @memberof CreateModerationResponseResultsInnerCategoryScores
- */
- 'violence': number;
- /**
- *
- * @type {number}
- * @memberof CreateModerationResponseResultsInnerCategoryScores
- */
- 'violence/graphic': number;
-}
-/**
- *
- * @export
- * @interface CreateSearchRequest
- */
-export interface CreateSearchRequest {
- /**
- * Query to search against the documents.
- * @type {string}
- * @memberof CreateSearchRequest
- */
- 'query': string;
- /**
- * Up to 200 documents to search over, provided as a list of strings. The maximum document length (in tokens) is 2034 minus the number of tokens in the query. You should specify either `documents` or a `file`, but not both.
- * @type {Array}
- * @memberof CreateSearchRequest
- */
- 'documents'?: Array | null;
- /**
- * The ID of an uploaded file that contains documents to search over. You should specify either `documents` or a `file`, but not both.
- * @type {string}
- * @memberof CreateSearchRequest
- */
- 'file'?: string | null;
- /**
- * The maximum number of documents to be re-ranked and returned by search. This flag only takes effect when `file` is set.
- * @type {number}
- * @memberof CreateSearchRequest
- */
- 'max_rerank'?: number | null;
- /**
- * A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a \"metadata\" field. This flag only takes effect when `file` is set.
- * @type {boolean}
- * @memberof CreateSearchRequest
- */
- 'return_metadata'?: boolean | null;
- /**
- * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
- * @type {string}
- * @memberof CreateSearchRequest
- */
- 'user'?: string;
-}
-/**
- *
- * @export
- * @interface CreateSearchResponse
- */
-export interface CreateSearchResponse {
- /**
- *
- * @type {string}
- * @memberof CreateSearchResponse
- */
- 'object'?: string;
- /**
- *
- * @type {string}
- * @memberof CreateSearchResponse
- */
- 'model'?: string;
- /**
- *
- * @type {Array}
- * @memberof CreateSearchResponse
- */
- 'data'?: Array;
-}
-/**
- *
- * @export
- * @interface CreateSearchResponseDataInner
- */
-export interface CreateSearchResponseDataInner {
- /**
- *
- * @type {string}
- * @memberof CreateSearchResponseDataInner
- */
- 'object'?: string;
- /**
- *
- * @type {number}
- * @memberof CreateSearchResponseDataInner
- */
- 'document'?: number;
- /**
- *
- * @type {number}
- * @memberof CreateSearchResponseDataInner
- */
- 'score'?: number;
-}
-/**
- *
- * @export
- * @interface CreateTranscriptionResponse
- */
-export interface CreateTranscriptionResponse {
- /**
- *
- * @type {string}
- * @memberof CreateTranscriptionResponse
- */
- 'text': string;
-}
-/**
- *
- * @export
- * @interface CreateTranslationResponse
- */
-export interface CreateTranslationResponse {
- /**
- *
- * @type {string}
- * @memberof CreateTranslationResponse
- */
- 'text': string;
-}
-/**
- *
- * @export
- * @interface DeleteFileResponse
- */
-export interface DeleteFileResponse {
- /**
- *
- * @type {string}
- * @memberof DeleteFileResponse
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof DeleteFileResponse
- */
- 'object': string;
- /**
- *
- * @type {boolean}
- * @memberof DeleteFileResponse
- */
- 'deleted': boolean;
-}
-/**
- *
- * @export
- * @interface DeleteModelResponse
- */
-export interface DeleteModelResponse {
- /**
- *
- * @type {string}
- * @memberof DeleteModelResponse
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof DeleteModelResponse
- */
- 'object': string;
- /**
- *
- * @type {boolean}
- * @memberof DeleteModelResponse
- */
- 'deleted': boolean;
-}
-/**
- *
- * @export
- * @interface Engine
- */
-export interface Engine {
- /**
- *
- * @type {string}
- * @memberof Engine
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof Engine
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof Engine
- */
- 'created': number | null;
- /**
- *
- * @type {boolean}
- * @memberof Engine
- */
- 'ready': boolean;
-}
-/**
- *
- * @export
- * @interface ErrorResponse
- */
-export interface ErrorResponse {
- /**
- *
- * @type {Error}
- * @memberof ErrorResponse
- */
- 'error': Error;
-}
-/**
- *
- * @export
- * @interface FineTune
- */
-export interface FineTune {
- /**
- *
- * @type {string}
- * @memberof FineTune
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof FineTune
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof FineTune
- */
- 'created_at': number;
- /**
- *
- * @type {number}
- * @memberof FineTune
- */
- 'updated_at': number;
- /**
- *
- * @type {string}
- * @memberof FineTune
- */
- 'model': string;
- /**
- *
- * @type {string}
- * @memberof FineTune
- */
- 'fine_tuned_model': string | null;
- /**
- *
- * @type {string}
- * @memberof FineTune
- */
- 'organization_id': string;
- /**
- *
- * @type {string}
- * @memberof FineTune
- */
- 'status': string;
- /**
- *
- * @type {object}
- * @memberof FineTune
- */
- 'hyperparams': object;
- /**
- *
- * @type {Array}
- * @memberof FineTune
- */
- 'training_files': Array;
- /**
- *
- * @type {Array}
- * @memberof FineTune
- */
- 'validation_files': Array;
- /**
- *
- * @type {Array}
- * @memberof FineTune
- */
- 'result_files': Array;
- /**
- *
- * @type {Array}
- * @memberof FineTune
- */
- 'events'?: Array;
-}
-/**
- *
- * @export
- * @interface FineTuneEvent
- */
-export interface FineTuneEvent {
- /**
- *
- * @type {string}
- * @memberof FineTuneEvent
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof FineTuneEvent
- */
- 'created_at': number;
- /**
- *
- * @type {string}
- * @memberof FineTuneEvent
- */
- 'level': string;
- /**
- *
- * @type {string}
- * @memberof FineTuneEvent
- */
- 'message': string;
-}
-/**
- *
- * @export
- * @interface ImagesResponse
- */
-export interface ImagesResponse {
- /**
- *
- * @type {number}
- * @memberof ImagesResponse
- */
- 'created': number;
- /**
- *
- * @type {Array}
- * @memberof ImagesResponse
- */
- 'data': Array;
-}
-/**
- *
- * @export
- * @interface ImagesResponseDataInner
- */
-export interface ImagesResponseDataInner {
- /**
- *
- * @type {string}
- * @memberof ImagesResponseDataInner
- */
- 'url'?: string;
- /**
- *
- * @type {string}
- * @memberof ImagesResponseDataInner
- */
- 'b64_json'?: string;
-}
-/**
- *
- * @export
- * @interface ListEnginesResponse
- */
-export interface ListEnginesResponse {
- /**
- *
- * @type {string}
- * @memberof ListEnginesResponse
- */
- 'object': string;
- /**
- *
- * @type {Array}
- * @memberof ListEnginesResponse
- */
- 'data': Array;
-}
-/**
- *
- * @export
- * @interface ListFilesResponse
- */
-export interface ListFilesResponse {
- /**
- *
- * @type {string}
- * @memberof ListFilesResponse
- */
- 'object': string;
- /**
- *
- * @type {Array}
- * @memberof ListFilesResponse
- */
- 'data': Array;
-}
-/**
- *
- * @export
- * @interface ListFineTuneEventsResponse
- */
-export interface ListFineTuneEventsResponse {
- /**
- *
- * @type {string}
- * @memberof ListFineTuneEventsResponse
- */
- 'object': string;
- /**
- *
- * @type {Array}
- * @memberof ListFineTuneEventsResponse
- */
- 'data': Array;
-}
-/**
- *
- * @export
- * @interface ListFineTunesResponse
- */
-export interface ListFineTunesResponse {
- /**
- *
- * @type {string}
- * @memberof ListFineTunesResponse
- */
- 'object': string;
- /**
- *
- * @type {Array}
- * @memberof ListFineTunesResponse
- */
- 'data': Array;
-}
-/**
- *
- * @export
- * @interface ListModelsResponse
- */
-export interface ListModelsResponse {
- /**
- *
- * @type {string}
- * @memberof ListModelsResponse
- */
- 'object': string;
- /**
- *
- * @type {Array}
- * @memberof ListModelsResponse
- */
- 'data': Array;
-}
-/**
- *
- * @export
- * @interface Model
- */
-export interface Model {
- /**
- *
- * @type {string}
- * @memberof Model
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof Model
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof Model
- */
- 'created': number;
- /**
- *
- * @type {string}
- * @memberof Model
- */
- 'owned_by': string;
-}
-/**
- *
- * @export
- * @interface ModelError
- */
-export interface ModelError {
- /**
- *
- * @type {string}
- * @memberof ModelError
- */
- 'type': string;
- /**
- *
- * @type {string}
- * @memberof ModelError
- */
- 'message': string;
- /**
- *
- * @type {string}
- * @memberof ModelError
- */
- 'param': string | null;
- /**
- *
- * @type {string}
- * @memberof ModelError
- */
- 'code': string | null;
-}
-/**
- *
- * @export
- * @interface OpenAIFile
- */
-export interface OpenAIFile {
- /**
- *
- * @type {string}
- * @memberof OpenAIFile
- */
- 'id': string;
- /**
- *
- * @type {string}
- * @memberof OpenAIFile
- */
- 'object': string;
- /**
- *
- * @type {number}
- * @memberof OpenAIFile
- */
- 'bytes': number;
- /**
- *
- * @type {number}
- * @memberof OpenAIFile
- */
- 'created_at': number;
- /**
- *
- * @type {string}
- * @memberof OpenAIFile
- */
- 'filename': string;
- /**
- *
- * @type {string}
- * @memberof OpenAIFile
- */
- 'purpose': string;
- /**
- *
- * @type {string}
- * @memberof OpenAIFile
- */
- 'status'?: string;
- /**
- *
- * @type {object}
- * @memberof OpenAIFile
- */
- 'status_details'?: object | null;
-}
-/**
- * OpenAIApi - axios parameter creator
- * @export
- */
-export declare const OpenAIApiAxiosParamCreator: (configuration?: Configuration) => {
- /**
- *
- * @summary Immediately cancel a fine-tune job.
- * @param {string} fineTuneId The ID of the fine-tune job to cancel
- * @param {*} [options] Override http request option.
- * @throws {RequiredError}
- */
- cancelFineTune: (fineTuneId: string, options?: AxiosRequestConfig) => Promise;
- /**
- *
- * @summary Answers the specified question using the provided documents and examples. The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions).
- * @param {CreateAnswerRequest} createAnswerRequest
- * @param {*} [options] Override http request option.
- * @deprecated
- * @throws {RequiredError}
- */
- createAnswer: (createAnswerRequest: CreateAnswerRequest, options?: AxiosRequestConfig) => Promise