Skip to content

Commit ea4a68c

Browse files
chore(docs): fix incorrect client var names (#955)
1 parent 38fa3f8 commit ea4a68c

24 files changed

+182
-182
lines changed

Diff for: README.md

+16-16
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ const client = new OpenAI({
3535
});
3636

3737
async function main() {
38-
const chatCompletion = await openai.chat.completions.create({
38+
const chatCompletion = await client.chat.completions.create({
3939
messages: [{ role: 'user', content: 'Say this is a test' }],
4040
model: 'gpt-3.5-turbo',
4141
});
@@ -53,7 +53,7 @@ import OpenAI from 'openai';
5353

5454
const client = new OpenAI();
5555

56-
const stream = await openai.chat.completions.create({
56+
const stream = await client.chat.completions.create({
5757
messages: [{ role: 'user', content: 'Say this is a test' }],
5858
model: 'gpt-3.5-turbo',
5959
stream: true,
@@ -83,7 +83,7 @@ async function main() {
8383
messages: [{ role: 'user', content: 'Say this is a test' }],
8484
model: 'gpt-3.5-turbo',
8585
};
86-
const chatCompletion: OpenAI.Chat.ChatCompletion = await openai.chat.completions.create(params);
86+
const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params);
8787
}
8888

8989
main();
@@ -108,20 +108,20 @@ import OpenAI, { toFile } from 'openai';
108108
const client = new OpenAI();
109109

110110
// If you have access to Node `fs` we recommend using `fs.createReadStream()`:
111-
await openai.files.create({ file: fs.createReadStream('input.jsonl'), purpose: 'fine-tune' });
111+
await client.files.create({ file: fs.createReadStream('input.jsonl'), purpose: 'fine-tune' });
112112

113113
// Or if you have the web `File` API you can pass a `File` instance:
114-
await openai.files.create({ file: new File(['my bytes'], 'input.jsonl'), purpose: 'fine-tune' });
114+
await client.files.create({ file: new File(['my bytes'], 'input.jsonl'), purpose: 'fine-tune' });
115115

116116
// You can also pass a `fetch` `Response`:
117-
await openai.files.create({ file: await fetch('https://somesite/input.jsonl'), purpose: 'fine-tune' });
117+
await client.files.create({ file: await fetch('https://somesite/input.jsonl'), purpose: 'fine-tune' });
118118

119119
// Finally, if none of the above are convenient, you can use our `toFile` helper:
120-
await openai.files.create({
120+
await client.files.create({
121121
file: await toFile(Buffer.from('my bytes'), 'input.jsonl'),
122122
purpose: 'fine-tune',
123123
});
124-
await openai.files.create({
124+
await client.files.create({
125125
file: await toFile(new Uint8Array([0, 1, 2]), 'input.jsonl'),
126126
purpose: 'fine-tune',
127127
});
@@ -136,7 +136,7 @@ a subclass of `APIError` will be thrown:
136136
<!-- prettier-ignore -->
137137
```ts
138138
async function main() {
139-
const job = await openai.fineTuning.jobs
139+
const job = await client.fineTuning.jobs
140140
.create({ model: 'gpt-3.5-turbo', training_file: 'file-abc123' })
141141
.catch(async (err) => {
142142
if (err instanceof OpenAI.APIError) {
@@ -181,7 +181,7 @@ const client = new OpenAI({
181181
});
182182

183183
// Or, configure per-request:
184-
await openai.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in Node.js?' }], model: 'gpt-3.5-turbo' }, {
184+
await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in Node.js?' }], model: 'gpt-3.5-turbo' }, {
185185
maxRetries: 5,
186186
});
187187
```
@@ -198,7 +198,7 @@ const client = new OpenAI({
198198
});
199199

200200
// Override per-request:
201-
await openai.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-3.5-turbo' }, {
201+
await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-3.5-turbo' }, {
202202
timeout: 5 * 1000,
203203
});
204204
```
@@ -216,7 +216,7 @@ You can use `for await … of` syntax to iterate through items across all pages:
216216
async function fetchAllFineTuningJobs(params) {
217217
const allFineTuningJobs = [];
218218
// Automatically fetches more pages as needed.
219-
for await (const fineTuningJob of openai.fineTuning.jobs.list({ limit: 20 })) {
219+
for await (const fineTuningJob of client.fineTuning.jobs.list({ limit: 20 })) {
220220
allFineTuningJobs.push(fineTuningJob);
221221
}
222222
return allFineTuningJobs;
@@ -226,7 +226,7 @@ async function fetchAllFineTuningJobs(params) {
226226
Alternatively, you can make request a single page at a time:
227227

228228
```ts
229-
let page = await openai.fineTuning.jobs.list({ limit: 20 });
229+
let page = await client.fineTuning.jobs.list({ limit: 20 });
230230
for (const fineTuningJob of page.data) {
231231
console.log(fineTuningJob);
232232
}
@@ -250,13 +250,13 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi
250250
```ts
251251
const client = new OpenAI();
252252

253-
const response = await openai.chat.completions
253+
const response = await client.chat.completions
254254
.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' })
255255
.asResponse();
256256
console.log(response.headers.get('X-My-Header'));
257257
console.log(response.statusText); // access the underlying Response object
258258

259-
const { data: chatCompletion, response: raw } = await openai.chat.completions
259+
const { data: chatCompletion, response: raw } = await client.chat.completions
260260
.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' })
261261
.withResponse();
262262
console.log(raw.headers.get('X-My-Header'));
@@ -364,7 +364,7 @@ const client = new OpenAI({
364364
});
365365

366366
// Override per-request:
367-
await openai.models.list({
367+
await client.models.list({
368368
httpAgent: new http.Agent({ keepAlive: false }),
369369
});
370370
```

Diff for: tests/api-resources/audio/speech.test.ts

+2-2
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,15 @@
22

33
import OpenAI from 'openai';
44

5-
const openai = new OpenAI({
5+
const client = new OpenAI({
66
apiKey: 'My API Key',
77
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
88
});
99

1010
describe('resource speech', () => {
1111
// binary tests are currently broken
1212
test.skip('create: required and optional params', async () => {
13-
const response = await openai.audio.speech.create({
13+
const response = await client.audio.speech.create({
1414
input: 'input',
1515
model: 'string',
1616
voice: 'alloy',

Diff for: tests/api-resources/audio/transcriptions.test.ts

+3-3
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@
33
import OpenAI, { toFile } from 'openai';
44
import { Response } from 'node-fetch';
55

6-
const openai = new OpenAI({
6+
const client = new OpenAI({
77
apiKey: 'My API Key',
88
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
99
});
1010

1111
describe('resource transcriptions', () => {
1212
test('create: only required params', async () => {
13-
const responsePromise = openai.audio.transcriptions.create({
13+
const responsePromise = client.audio.transcriptions.create({
1414
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
1515
model: 'whisper-1',
1616
});
@@ -24,7 +24,7 @@ describe('resource transcriptions', () => {
2424
});
2525

2626
test('create: required and optional params', async () => {
27-
const response = await openai.audio.transcriptions.create({
27+
const response = await client.audio.transcriptions.create({
2828
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
2929
model: 'whisper-1',
3030
language: 'language',

Diff for: tests/api-resources/audio/translations.test.ts

+3-3
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@
33
import OpenAI, { toFile } from 'openai';
44
import { Response } from 'node-fetch';
55

6-
const openai = new OpenAI({
6+
const client = new OpenAI({
77
apiKey: 'My API Key',
88
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
99
});
1010

1111
describe('resource translations', () => {
1212
test('create: only required params', async () => {
13-
const responsePromise = openai.audio.translations.create({
13+
const responsePromise = client.audio.translations.create({
1414
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
1515
model: 'whisper-1',
1616
});
@@ -24,7 +24,7 @@ describe('resource translations', () => {
2424
});
2525

2626
test('create: required and optional params', async () => {
27-
const response = await openai.audio.translations.create({
27+
const response = await client.audio.translations.create({
2828
file: await toFile(Buffer.from('# my file contents'), 'README.md'),
2929
model: 'whisper-1',
3030
prompt: 'prompt',

Diff for: tests/api-resources/batches.test.ts

+10-10
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@
33
import OpenAI from 'openai';
44
import { Response } from 'node-fetch';
55

6-
const openai = new OpenAI({
6+
const client = new OpenAI({
77
apiKey: 'My API Key',
88
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
99
});
1010

1111
describe('resource batches', () => {
1212
test('create: only required params', async () => {
13-
const responsePromise = openai.batches.create({
13+
const responsePromise = client.batches.create({
1414
completion_window: '24h',
1515
endpoint: '/v1/chat/completions',
1616
input_file_id: 'input_file_id',
@@ -25,7 +25,7 @@ describe('resource batches', () => {
2525
});
2626

2727
test('create: required and optional params', async () => {
28-
const response = await openai.batches.create({
28+
const response = await client.batches.create({
2929
completion_window: '24h',
3030
endpoint: '/v1/chat/completions',
3131
input_file_id: 'input_file_id',
@@ -34,7 +34,7 @@ describe('resource batches', () => {
3434
});
3535

3636
test('retrieve', async () => {
37-
const responsePromise = openai.batches.retrieve('batch_id');
37+
const responsePromise = client.batches.retrieve('batch_id');
3838
const rawResponse = await responsePromise.asResponse();
3939
expect(rawResponse).toBeInstanceOf(Response);
4040
const response = await responsePromise;
@@ -46,13 +46,13 @@ describe('resource batches', () => {
4646

4747
test('retrieve: request options instead of params are passed correctly', async () => {
4848
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
49-
await expect(openai.batches.retrieve('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
49+
await expect(client.batches.retrieve('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
5050
OpenAI.NotFoundError,
5151
);
5252
});
5353

5454
test('list', async () => {
55-
const responsePromise = openai.batches.list();
55+
const responsePromise = client.batches.list();
5656
const rawResponse = await responsePromise.asResponse();
5757
expect(rawResponse).toBeInstanceOf(Response);
5858
const response = await responsePromise;
@@ -64,20 +64,20 @@ describe('resource batches', () => {
6464

6565
test('list: request options instead of params are passed correctly', async () => {
6666
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
67-
await expect(openai.batches.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
67+
await expect(client.batches.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
6868
OpenAI.NotFoundError,
6969
);
7070
});
7171

7272
test('list: request options and params are passed correctly', async () => {
7373
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
7474
await expect(
75-
openai.batches.list({ after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }),
75+
client.batches.list({ after: 'after', limit: 0 }, { path: '/_stainless_unknown_path' }),
7676
).rejects.toThrow(OpenAI.NotFoundError);
7777
});
7878

7979
test('cancel', async () => {
80-
const responsePromise = openai.batches.cancel('batch_id');
80+
const responsePromise = client.batches.cancel('batch_id');
8181
const rawResponse = await responsePromise.asResponse();
8282
expect(rawResponse).toBeInstanceOf(Response);
8383
const response = await responsePromise;
@@ -89,7 +89,7 @@ describe('resource batches', () => {
8989

9090
test('cancel: request options instead of params are passed correctly', async () => {
9191
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
92-
await expect(openai.batches.cancel('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
92+
await expect(client.batches.cancel('batch_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
9393
OpenAI.NotFoundError,
9494
);
9595
});

Diff for: tests/api-resources/beta/assistants.test.ts

+11-11
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@
33
import OpenAI from 'openai';
44
import { Response } from 'node-fetch';
55

6-
const openai = new OpenAI({
6+
const client = new OpenAI({
77
apiKey: 'My API Key',
88
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
99
});
1010

1111
describe('resource assistants', () => {
1212
test('create: only required params', async () => {
13-
const responsePromise = openai.beta.assistants.create({ model: 'gpt-4-turbo' });
13+
const responsePromise = client.beta.assistants.create({ model: 'gpt-4-turbo' });
1414
const rawResponse = await responsePromise.asResponse();
1515
expect(rawResponse).toBeInstanceOf(Response);
1616
const response = await responsePromise;
@@ -21,7 +21,7 @@ describe('resource assistants', () => {
2121
});
2222

2323
test('create: required and optional params', async () => {
24-
const response = await openai.beta.assistants.create({
24+
const response = await client.beta.assistants.create({
2525
model: 'gpt-4-turbo',
2626
description: 'description',
2727
instructions: 'instructions',
@@ -44,7 +44,7 @@ describe('resource assistants', () => {
4444
});
4545

4646
test('retrieve', async () => {
47-
const responsePromise = openai.beta.assistants.retrieve('assistant_id');
47+
const responsePromise = client.beta.assistants.retrieve('assistant_id');
4848
const rawResponse = await responsePromise.asResponse();
4949
expect(rawResponse).toBeInstanceOf(Response);
5050
const response = await responsePromise;
@@ -57,12 +57,12 @@ describe('resource assistants', () => {
5757
test('retrieve: request options instead of params are passed correctly', async () => {
5858
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
5959
await expect(
60-
openai.beta.assistants.retrieve('assistant_id', { path: '/_stainless_unknown_path' }),
60+
client.beta.assistants.retrieve('assistant_id', { path: '/_stainless_unknown_path' }),
6161
).rejects.toThrow(OpenAI.NotFoundError);
6262
});
6363

6464
test('update', async () => {
65-
const responsePromise = openai.beta.assistants.update('assistant_id', {});
65+
const responsePromise = client.beta.assistants.update('assistant_id', {});
6666
const rawResponse = await responsePromise.asResponse();
6767
expect(rawResponse).toBeInstanceOf(Response);
6868
const response = await responsePromise;
@@ -73,7 +73,7 @@ describe('resource assistants', () => {
7373
});
7474

7575
test('list', async () => {
76-
const responsePromise = openai.beta.assistants.list();
76+
const responsePromise = client.beta.assistants.list();
7777
const rawResponse = await responsePromise.asResponse();
7878
expect(rawResponse).toBeInstanceOf(Response);
7979
const response = await responsePromise;
@@ -85,23 +85,23 @@ describe('resource assistants', () => {
8585

8686
test('list: request options instead of params are passed correctly', async () => {
8787
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
88-
await expect(openai.beta.assistants.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
88+
await expect(client.beta.assistants.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
8989
OpenAI.NotFoundError,
9090
);
9191
});
9292

9393
test('list: request options and params are passed correctly', async () => {
9494
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
9595
await expect(
96-
openai.beta.assistants.list(
96+
client.beta.assistants.list(
9797
{ after: 'after', before: 'before', limit: 0, order: 'asc' },
9898
{ path: '/_stainless_unknown_path' },
9999
),
100100
).rejects.toThrow(OpenAI.NotFoundError);
101101
});
102102

103103
test('del', async () => {
104-
const responsePromise = openai.beta.assistants.del('assistant_id');
104+
const responsePromise = client.beta.assistants.del('assistant_id');
105105
const rawResponse = await responsePromise.asResponse();
106106
expect(rawResponse).toBeInstanceOf(Response);
107107
const response = await responsePromise;
@@ -114,7 +114,7 @@ describe('resource assistants', () => {
114114
test('del: request options instead of params are passed correctly', async () => {
115115
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
116116
await expect(
117-
openai.beta.assistants.del('assistant_id', { path: '/_stainless_unknown_path' }),
117+
client.beta.assistants.del('assistant_id', { path: '/_stainless_unknown_path' }),
118118
).rejects.toThrow(OpenAI.NotFoundError);
119119
});
120120
});

0 commit comments

Comments
 (0)