@@ -35,7 +35,7 @@ const client = new OpenAI({
35
35
});
36
36
37
37
async function main () {
38
- const chatCompletion = await openai .chat .completions .create ({
38
+ const chatCompletion = await client .chat .completions .create ({
39
39
messages: [{ role: ' user' , content: ' Say this is a test' }],
40
40
model: ' gpt-3.5-turbo' ,
41
41
});
@@ -53,7 +53,7 @@ import OpenAI from 'openai';
53
53
54
54
const client = new OpenAI ();
55
55
56
- const stream = await openai .chat .completions .create ({
56
+ const stream = await client .chat .completions .create ({
57
57
messages: [{ role: ' user' , content: ' Say this is a test' }],
58
58
model: ' gpt-3.5-turbo' ,
59
59
stream: true ,
@@ -83,7 +83,7 @@ async function main() {
83
83
messages: [{ role: ' user' , content: ' Say this is a test' }],
84
84
model: ' gpt-3.5-turbo' ,
85
85
};
86
- const chatCompletion: OpenAI .Chat .ChatCompletion = await openai .chat .completions .create (params );
86
+ const chatCompletion: OpenAI .Chat .ChatCompletion = await client .chat .completions .create (params );
87
87
}
88
88
89
89
main ();
@@ -108,20 +108,20 @@ import OpenAI, { toFile } from 'openai';
108
108
const client = new OpenAI ();
109
109
110
110
// If you have access to Node `fs` we recommend using `fs.createReadStream()`:
111
- await openai .files .create ({ file: fs .createReadStream (' input.jsonl' ), purpose: ' fine-tune' });
111
+ await client .files .create ({ file: fs .createReadStream (' input.jsonl' ), purpose: ' fine-tune' });
112
112
113
113
// Or if you have the web `File` API you can pass a `File` instance:
114
- await openai .files .create ({ file: new File ([' my bytes' ], ' input.jsonl' ), purpose: ' fine-tune' });
114
+ await client .files .create ({ file: new File ([' my bytes' ], ' input.jsonl' ), purpose: ' fine-tune' });
115
115
116
116
// You can also pass a `fetch` `Response`:
117
- await openai .files .create ({ file: await fetch (' https://somesite/input.jsonl' ), purpose: ' fine-tune' });
117
+ await client .files .create ({ file: await fetch (' https://somesite/input.jsonl' ), purpose: ' fine-tune' });
118
118
119
119
// Finally, if none of the above are convenient, you can use our `toFile` helper:
120
- await openai .files .create ({
120
+ await client .files .create ({
121
121
file: await toFile (Buffer .from (' my bytes' ), ' input.jsonl' ),
122
122
purpose: ' fine-tune' ,
123
123
});
124
- await openai .files .create ({
124
+ await client .files .create ({
125
125
file: await toFile (new Uint8Array ([0 , 1 , 2 ]), ' input.jsonl' ),
126
126
purpose: ' fine-tune' ,
127
127
});
@@ -136,7 +136,7 @@ a subclass of `APIError` will be thrown:
136
136
<!-- prettier-ignore -->
137
137
``` ts
138
138
async function main() {
139
- const job = await openai .fineTuning .jobs
139
+ const job = await client .fineTuning .jobs
140
140
.create ({ model: ' gpt-3.5-turbo' , training_file: ' file-abc123' })
141
141
.catch (async (err ) => {
142
142
if (err instanceof OpenAI .APIError ) {
@@ -181,7 +181,7 @@ const client = new OpenAI({
181
181
});
182
182
183
183
// Or, configure per-request:
184
- await openai .chat .completions .create ({ messages: [{ role: ' user' , content: ' How can I get the name of the current day in Node.js?' }], model: ' gpt-3.5-turbo' }, {
184
+ await client .chat .completions .create ({ messages: [{ role: ' user' , content: ' How can I get the name of the current day in Node.js?' }], model: ' gpt-3.5-turbo' }, {
185
185
maxRetries: 5 ,
186
186
});
187
187
```
@@ -198,7 +198,7 @@ const client = new OpenAI({
198
198
});
199
199
200
200
// Override per-request:
201
- await openai .chat .completions .create ({ messages: [{ role: ' user' , content: ' How can I list all files in a directory using Python?' }], model: ' gpt-3.5-turbo' }, {
201
+ await client .chat .completions .create ({ messages: [{ role: ' user' , content: ' How can I list all files in a directory using Python?' }], model: ' gpt-3.5-turbo' }, {
202
202
timeout: 5 * 1000 ,
203
203
});
204
204
```
@@ -216,7 +216,7 @@ You can use `for await … of` syntax to iterate through items across all pages:
216
216
async function fetchAllFineTuningJobs(params ) {
217
217
const allFineTuningJobs = [];
218
218
// Automatically fetches more pages as needed.
219
- for await (const fineTuningJob of openai .fineTuning .jobs .list ({ limit: 20 })) {
219
+ for await (const fineTuningJob of client .fineTuning .jobs .list ({ limit: 20 })) {
220
220
allFineTuningJobs .push (fineTuningJob );
221
221
}
222
222
return allFineTuningJobs ;
@@ -226,7 +226,7 @@ async function fetchAllFineTuningJobs(params) {
226
226
Alternatively, you can make request a single page at a time:
227
227
228
228
``` ts
229
- let page = await openai .fineTuning .jobs .list ({ limit: 20 });
229
+ let page = await client .fineTuning .jobs .list ({ limit: 20 });
230
230
for (const fineTuningJob of page .data ) {
231
231
console .log (fineTuningJob );
232
232
}
@@ -250,13 +250,13 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi
250
250
``` ts
251
251
const client = new OpenAI ();
252
252
253
- const response = await openai .chat .completions
253
+ const response = await client .chat .completions
254
254
.create ({ messages: [{ role: ' user' , content: ' Say this is a test' }], model: ' gpt-3.5-turbo' })
255
255
.asResponse ();
256
256
console .log (response .headers .get (' X-My-Header' ));
257
257
console .log (response .statusText ); // access the underlying Response object
258
258
259
- const { data : chatCompletion, response : raw } = await openai .chat .completions
259
+ const { data : chatCompletion, response : raw } = await client .chat .completions
260
260
.create ({ messages: [{ role: ' user' , content: ' Say this is a test' }], model: ' gpt-3.5-turbo' })
261
261
.withResponse ();
262
262
console .log (raw .headers .get (' X-My-Header' ));
@@ -364,7 +364,7 @@ const client = new OpenAI({
364
364
});
365
365
366
366
// Override per-request:
367
- await openai .models .list ({
367
+ await client .models .list ({
368
368
httpAgent: new http .Agent ({ keepAlive: false }),
369
369
});
370
370
```
0 commit comments