@@ -32,7 +32,7 @@ The full API of this library can be found in [api.md file](api.md) along with ma
3232``` js
3333import OpenAI from ' openai' ;
3434
35- const openai = new OpenAI ({
35+ const client = new OpenAI ({
3636 apiKey: process .env [' OPENAI_API_KEY' ], // This is the default and can be omitted
3737});
3838
@@ -53,7 +53,7 @@ We provide support for streaming responses using Server Sent Events (SSE).
5353``` ts
5454import OpenAI from ' openai' ;
5555
56- const openai = new OpenAI ();
56+ const client = new OpenAI ();
5757
5858async function main() {
5959 const stream = await openai .chat .completions .create ({
@@ -80,7 +80,7 @@ This library includes TypeScript definitions for all request params and response
8080``` ts
8181import OpenAI from ' openai' ;
8282
83- const openai = new OpenAI ({
83+ const client = new OpenAI ({
8484 apiKey: process .env [' OPENAI_API_KEY' ], // This is the default and can be omitted
8585});
8686
@@ -301,7 +301,7 @@ import fs from 'fs';
301301import fetch from ' node-fetch' ;
302302import OpenAI , { toFile } from ' openai' ;
303303
304- const openai = new OpenAI ();
304+ const client = new OpenAI ();
305305
306306// If you have access to Node `fs` we recommend using `fs.createReadStream()`:
307307await openai .files .create ({ file: fs .createReadStream (' input.jsonl' ), purpose: ' fine-tune' });
@@ -399,7 +399,7 @@ You can use the `maxRetries` option to configure or disable this:
399399<!-- prettier-ignore -->
400400``` js
401401// Configure the default for all requests:
402- const openai = new OpenAI ({
402+ const client = new OpenAI ({
403403 maxRetries: 0 , // default is 2
404404});
405405
@@ -416,7 +416,7 @@ Requests time out after 10 minutes by default. You can configure this with a `ti
416416<!-- prettier-ignore -->
417417``` ts
418418// Configure the default for all requests:
419- const openai = new OpenAI ({
419+ const client = new OpenAI ({
420420 timeout: 20 * 1000 , // 20 seconds (default is 10 minutes)
421421});
422422
@@ -471,7 +471,7 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi
471471
472472<!-- prettier-ignore -->
473473``` ts
474- const openai = new OpenAI ();
474+ const client = new OpenAI ();
475475
476476const response = await openai .chat .completions
477477 .create ({ messages: [{ role: ' user' , content: ' Say this is a test' }], model: ' gpt-3.5-turbo' })
@@ -582,7 +582,7 @@ import http from 'http';
582582import { HttpsProxyAgent } from ' https-proxy-agent' ;
583583
584584// Configure the default for all requests:
585- const openai = new OpenAI ({
585+ const client = new OpenAI ({
586586 httpAgent: new HttpsProxyAgent (process .env .PROXY_URL ),
587587});
588588
0 commit comments