Skip to content

Commit f696b2a

Browse files
Stainless Botstainless-app[bot]
Stainless Bot
authored andcommitted
chore(docs): fix incorrect client var names (#955)
1 parent 83c1d17 commit f696b2a

24 files changed

+182
-182
lines changed

Diff for: README.md

+16-16
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ const client = new OpenAI({
3737
});
3838

3939
async function main() {
40-
const chatCompletion = await openai.chat.completions.create({
40+
const chatCompletion = await client.chat.completions.create({
4141
messages: [{ role: 'user', content: 'Say this is a test' }],
4242
model: 'gpt-3.5-turbo',
4343
});
@@ -56,7 +56,7 @@ import OpenAI from 'openai';
5656
const client = new OpenAI();
5757

5858
async function main() {
59-
const stream = await openai.chat.completions.create({
59+
const stream = await client.chat.completions.create({
6060
model: 'gpt-4',
6161
messages: [{ role: 'user', content: 'Say this is a test' }],
6262
stream: true,
@@ -89,7 +89,7 @@ async function main() {
8989
messages: [{ role: 'user', content: 'Say this is a test' }],
9090
model: 'gpt-3.5-turbo',
9191
};
92-
const chatCompletion: OpenAI.Chat.ChatCompletion = await openai.chat.completions.create(params);
92+
const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params);
9393
}
9494

9595
main();
@@ -304,20 +304,20 @@ import OpenAI, { toFile } from 'openai';
304304
const client = new OpenAI();
305305

306306
// If you have access to Node `fs` we recommend using `fs.createReadStream()`:
307-
await openai.files.create({ file: fs.createReadStream('input.jsonl'), purpose: 'fine-tune' });
307+
await client.files.create({ file: fs.createReadStream('input.jsonl'), purpose: 'fine-tune' });
308308

309309
// Or if you have the web `File` API you can pass a `File` instance:
310-
await openai.files.create({ file: new File(['my bytes'], 'input.jsonl'), purpose: 'fine-tune' });
310+
await client.files.create({ file: new File(['my bytes'], 'input.jsonl'), purpose: 'fine-tune' });
311311

312312
// You can also pass a `fetch` `Response`:
313-
await openai.files.create({ file: await fetch('https://somesite/input.jsonl'), purpose: 'fine-tune' });
313+
await client.files.create({ file: await fetch('https://somesite/input.jsonl'), purpose: 'fine-tune' });
314314

315315
// Finally, if none of the above are convenient, you can use our `toFile` helper:
316-
await openai.files.create({
316+
await client.files.create({
317317
file: await toFile(Buffer.from('my bytes'), 'input.jsonl'),
318318
purpose: 'fine-tune',
319319
});
320-
await openai.files.create({
320+
await client.files.create({
321321
file: await toFile(new Uint8Array([0, 1, 2]), 'input.jsonl'),
322322
purpose: 'fine-tune',
323323
});
@@ -332,7 +332,7 @@ a subclass of `APIError` will be thrown:
332332
<!-- prettier-ignore -->
333333
```ts
334334
async function main() {
335-
const job = await openai.fineTuning.jobs
335+
const job = await client.fineTuning.jobs
336336
.create({ model: 'gpt-3.5-turbo', training_file: 'file-abc123' })
337337
.catch(async (err) => {
338338
if (err instanceof OpenAI.APIError) {
@@ -404,7 +404,7 @@ const client = new OpenAI({
404404
});
405405

406406
// Or, configure per-request:
407-
await openai.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in Node.js?' }], model: 'gpt-3.5-turbo' }, {
407+
await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in Node.js?' }], model: 'gpt-3.5-turbo' }, {
408408
maxRetries: 5,
409409
});
410410
```
@@ -421,7 +421,7 @@ const client = new OpenAI({
421421
});
422422

423423
// Override per-request:
424-
await openai.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-3.5-turbo' }, {
424+
await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-3.5-turbo' }, {
425425
timeout: 5 * 1000,
426426
});
427427
```
@@ -439,7 +439,7 @@ You can use `for await … of` syntax to iterate through items across all pages:
439439
async function fetchAllFineTuningJobs(params) {
440440
const allFineTuningJobs = [];
441441
// Automatically fetches more pages as needed.
442-
for await (const fineTuningJob of openai.fineTuning.jobs.list({ limit: 20 })) {
442+
for await (const fineTuningJob of client.fineTuning.jobs.list({ limit: 20 })) {
443443
allFineTuningJobs.push(fineTuningJob);
444444
}
445445
return allFineTuningJobs;
@@ -449,7 +449,7 @@ async function fetchAllFineTuningJobs(params) {
449449
Alternatively, you can make request a single page at a time:
450450

451451
```ts
452-
let page = await openai.fineTuning.jobs.list({ limit: 20 });
452+
let page = await client.fineTuning.jobs.list({ limit: 20 });
453453
for (const fineTuningJob of page.data) {
454454
console.log(fineTuningJob);
455455
}
@@ -473,13 +473,13 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi
473473
```ts
474474
const client = new OpenAI();
475475

476-
const response = await openai.chat.completions
476+
const response = await client.chat.completions
477477
.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' })
478478
.asResponse();
479479
console.log(response.headers.get('X-My-Header'));
480480
console.log(response.statusText); // access the underlying Response object
481481

482-
const { data: chatCompletion, response: raw } = await openai.chat.completions
482+
const { data: chatCompletion, response: raw } = await client.chat.completions
483483
.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' })
484484
.withResponse();
485485
console.log(raw.headers.get('X-My-Header'));
@@ -587,7 +587,7 @@ const client = new OpenAI({
587587
});
588588

589589
// Override per-request:
590-
await openai.models.list({
590+
await client.models.list({
591591
httpAgent: new http.Agent({ keepAlive: false }),
592592
});
593593
```

Diff for: tests/api-resources/audio/speech.test.ts

+2-2
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,15 @@
22

33
import OpenAI from 'openai';
44

5-
const openai = new OpenAI({
5+
const client = new OpenAI({
66
apiKey: 'My API Key',
77
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
88
});
99

1010
describe('resource speech', () => {
1111
// binary tests are currently broken
1212
test.skip('create: required and optional params', async () => {
13-
const response = await openai.audio.speech.create({
13+
const response = await client.audio.speech.create({
1414
input: 'input',
1515
model: 'string',
1616
voice: 'alloy',

Diff for: tests/api-resources/audio/transcriptions.test.ts

+3-3
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@
33
import OpenAI, { toFile } from 'openai';
44
import { Response } from 'node-fetch';
55

6-
const openai = new OpenAI({
6+
const client = new OpenAI({
77
apiKey: 'My API Key',
88
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
99
});
1010

1111
describe('resource transcriptions', () => {
1212
test('create: only required params', async () => {
13-
const responsePromise = openai.audio.transcriptions.create({
13+
const responsePromise = client.audio.transcriptions.create({
1414
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
1515
model: 'whisper-1',
1616
});
@@ -24,7 +24,7 @@ describe('resource transcriptions', () => {
2424
});
2525

2626
test('create: required and optional params', async () => {
27-
const response = await openai.audio.transcriptions.create({
27+
const response = await client.audio.transcriptions.create({
2828
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
2929
model: 'whisper-1',
3030
language: 'language',

Diff for: tests/api-resources/audio/translations.test.ts

+3-3
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@
33
import OpenAI, { toFile } from 'openai';
44
import { Response } from 'node-fetch';
55

6-
const openai = new OpenAI({
6+
const client = new OpenAI({
77
apiKey: 'My API Key',
88
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
99
});
1010

1111
describe('resource translations', () => {
1212
test('create: only required params', async () => {
13-
const responsePromise = openai.audio.translations.create({
13+
const responsePromise = client.audio.translations.create({
1414
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
1515
model: 'whisper-1',
1616
});
@@ -24,7 +24,7 @@ describe('resource translations', () => {
2424
});
2525

2626
test('create: required and optional params', async () => {
27-
const response = await openai.audio.translations.create({
27+
const response = await client.audio.translations.create({
2828
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
2929
model: 'whisper-1',
3030
prompt: 'prompt',

Diff for: tests/api-resources/batches.test.ts

+10-10
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@
33
import OpenAI from 'openai';
44
import { Response } from 'node-fetch';
55

6-
const openai = new OpenAI({
6+
const client = new OpenAI({
77
apiKey: 'My API Key',
88
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
99
});
1010

1111
describe('resource batches', () => {
1212
test('create: only required params', async () => {
13-
const responsePromise = openai.batches.create({
13+
const responsePromise = client.batches.create({
1414
completion_window: '24h',
1515
endpoint: '/v1/chat/completions',
1616
input_file_id: 'input_file_id',
@@ -25,7 +25,7 @@ describe('resource batches', () => {
2525
});
2626

2727
test('create: required and optional params', async () => {
28-
const response = await openai.batches.create({
28+
const response = await client.batches.create({
2929
completion_window: '24h',
3030
endpoint: '/v1/chat/completions',
3131
input_file_id: 'input_file_id',
@@ -34,7 +34,7 @@ describe('resource batches', () => {
3434
});
3535

3636
test('retrieve', async () => {
37-
const responsePromise = openai.batches.retrieve('batch_id');
37+
const responsePromise = client.batches.retrieve('batch_id');
3838
const rawResponse = await responsePromise.asResponse();
3939
expect(rawResponse).toBeInstanceOf(Response);
4040
const response = await responsePromise;
@@ -46,13 +46,13 @@ describe('resource batches', () => {
4646

4747
test('retrieve: request options instead of params are passed correctly', async () => {
4848
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
49-
await expect(openai.batches.retrieve('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
49+
await expect(client.batches.retrieve('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
5050
OpenAI.NotFoundError,
5151
);
5252
});
5353

5454
test('list', async () => {
55-
const responsePromise = openai.batches.list();
55+
const responsePromise = client.batches.list();
5656
const rawResponse = await responsePromise.asResponse();
5757
expect(rawResponse).toBeInstanceOf(Response);
5858
const response = await responsePromise;
@@ -64,20 +64,20 @@ describe('resource batches', () => {
6464

6565
test('list: request options instead of params are passed correctly', async () => {
6666
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
67-
await expect(openai.batches.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
67+
await expect(client.batches.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
6868
OpenAI.NotFoundError,
6969
);
7070
});
7171

7272
test('list: request options and params are passed correctly', async () => {
7373
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
7474
await expect(
75-
openai.batches.list({ after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }),
75+
client.batches.list({ after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }),
7676
).rejects.toThrow(OpenAI.NotFoundError);
7777
});
7878

7979
test('cancel', async () => {
80-
const responsePromise = openai.batches.cancel('batch_id');
80+
const responsePromise = client.batches.cancel('batch_id');
8181
const rawResponse = await responsePromise.asResponse();
8282
expect(rawResponse).toBeInstanceOf(Response);
8383
const response = await responsePromise;
@@ -89,7 +89,7 @@ describe('resource batches', () => {
8989

9090
test('cancel: request options instead of params are passed correctly', async () => {
9191
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
92-
await expect(openai.batches.cancel('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
92+
await expect(client.batches.cancel('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
9393
OpenAI.NotFoundError,
9494
);
9595
});

Diff for: tests/api-resources/beta/assistants.test.ts

+11-11
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@
33
import OpenAI from 'openai';
44
import { Response } from 'node-fetch';
55

6-
const openai = new OpenAI({
6+
const client = new OpenAI({
77
apiKey: 'My API Key',
88
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
99
});
1010

1111
describe('resource assistants', () => {
1212
test('create: only required params', async () => {
13-
const responsePromise = openai.beta.assistants.create({ model: 'gpt-4-turbo' });
13+
const responsePromise = client.beta.assistants.create({ model: 'gpt-4-turbo' });
1414
const rawResponse = await responsePromise.asResponse();
1515
expect(rawResponse).toBeInstanceOf(Response);
1616
const response = await responsePromise;
@@ -21,7 +21,7 @@ describe('resource assistants', () => {
2121
});
2222

2323
test('create: required and optional params', async () => {
24-
const response = await openai.beta.assistants.create({
24+
const response = await client.beta.assistants.create({
2525
model: 'gpt-4-turbo',
2626
description: 'description',
2727
instructions: 'instructions',
@@ -44,7 +44,7 @@ describe('resource assistants', () => {
4444
});
4545

4646
test('retrieve', async () => {
47-
const responsePromise = openai.beta.assistants.retrieve('assistant_id');
47+
const responsePromise = client.beta.assistants.retrieve('assistant_id');
4848
const rawResponse = await responsePromise.asResponse();
4949
expect(rawResponse).toBeInstanceOf(Response);
5050
const response = await responsePromise;
@@ -57,12 +57,12 @@ describe('resource assistants', () => {
5757
test('retrieve: request options instead of params are passed correctly', async () => {
5858
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
5959
await expect(
60-
openai.beta.assistants.retrieve('assistant_id', { path: '/_stainless_unknown_path' }),
60+
client.beta.assistants.retrieve('assistant_id', { path: '/_stainless_unknown_path' }),
6161
).rejects.toThrow(OpenAI.NotFoundError);
6262
});
6363

6464
test('update', async () => {
65-
const responsePromise = openai.beta.assistants.update('assistant_id', {});
65+
const responsePromise = client.beta.assistants.update('assistant_id', {});
6666
const rawResponse = await responsePromise.asResponse();
6767
expect(rawResponse).toBeInstanceOf(Response);
6868
const response = await responsePromise;
@@ -73,7 +73,7 @@ describe('resource assistants', () => {
7373
});
7474

7575
test('list', async () => {
76-
const responsePromise = openai.beta.assistants.list();
76+
const responsePromise = client.beta.assistants.list();
7777
const rawResponse = await responsePromise.asResponse();
7878
expect(rawResponse).toBeInstanceOf(Response);
7979
const response = await responsePromise;
@@ -85,23 +85,23 @@ describe('resource assistants', () => {
8585

8686
test('list: request options instead of params are passed correctly', async () => {
8787
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
88-
await expect(openai.beta.assistants.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
88+
await expect(client.beta.assistants.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
8989
OpenAI.NotFoundError,
9090
);
9191
});
9292

9393
test('list: request options and params are passed correctly', async () => {
9494
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
9595
await expect(
96-
openai.beta.assistants.list(
96+
client.beta.assistants.list(
9797
{ after: 'after', before: 'before', limit: 0, order: 'asc' },
9898
{ path: '/_stainless_unknown_path' },
9999
),
100100
).rejects.toThrow(OpenAI.NotFoundError);
101101
});
102102

103103
test('del', async () => {
104-
const responsePromise = openai.beta.assistants.del('assistant_id');
104+
const responsePromise = client.beta.assistants.del('assistant_id');
105105
const rawResponse = await responsePromise.asResponse();
106106
expect(rawResponse).toBeInstanceOf(Response);
107107
const response = await responsePromise;
@@ -114,7 +114,7 @@ describe('resource assistants', () => {
114114
test('del: request options instead of params are passed correctly', async () => {
115115
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
116116
await expect(
117-
openai.beta.assistants.del('assistant_id', { path: '/_stainless_unknown_path' }),
117+
client.beta.assistants.del('assistant_id', { path: '/_stainless_unknown_path' }),
118118
).rejects.toThrow(OpenAI.NotFoundError);
119119
});
120120
});

0 commit comments

Comments
 (0)