Skip to content

Commit 0c75bbd

Browse files
committed
1 parent 01f0188 commit 0c75bbd

File tree

6 files changed

+319
-1
lines changed

6 files changed

+319
-1
lines changed

Diff for: .stats.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
configured_endpoints: 52
1+
configured_endpoints: 55

Diff for: api.md

+14
Original file line numberDiff line numberDiff line change
@@ -337,3 +337,17 @@ Methods:
337337

338338
- <code title="get /threads/{thread_id}/messages/{message_id}/files/{file_id}">client.beta.threads.messages.files.<a href="./src/resources/beta/threads/messages/files.ts">retrieve</a>(threadId, messageId, fileId) -> MessageFile</code>
339339
- <code title="get /threads/{thread_id}/messages/{message_id}/files">client.beta.threads.messages.files.<a href="./src/resources/beta/threads/messages/files.ts">list</a>(threadId, messageId, { ...params }) -> MessageFilesPage</code>
340+
341+
# Batches
342+
343+
Types:
344+
345+
- <code><a href="./src/resources/batches.ts">Batch</a></code>
346+
- <code><a href="./src/resources/batches.ts">BatchError</a></code>
347+
- <code><a href="./src/resources/batches.ts">BatchRequestCounts</a></code>
348+
349+
Methods:
350+
351+
- <code title="post /batches">client.batches.<a href="./src/resources/batches.ts">create</a>({ ...params }) -> Batch</code>
352+
- <code title="get /batches/{batch_id}">client.batches.<a href="./src/resources/batches.ts">retrieve</a>(batchId) -> Batch</code>
353+
- <code title="post /batches/{batch_id}/cancel">client.batches.<a href="./src/resources/batches.ts">cancel</a>(batchId) -> Batch</code>

Diff for: src/index.ts

+7
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,7 @@ export class OpenAI extends Core.APIClient {
150150
models: API.Models = new API.Models(this);
151151
fineTuning: API.FineTuning = new API.FineTuning(this);
152152
beta: API.Beta = new API.Beta(this);
153+
batches: API.Batches = new API.Batches(this);
153154

154155
protected override defaultQuery(): Core.DefaultQuery | undefined {
155156
return this._options.defaultQuery;
@@ -285,6 +286,12 @@ export namespace OpenAI {
285286

286287
export import Beta = API.Beta;
287288

289+
export import Batches = API.Batches;
290+
export import Batch = API.Batch;
291+
export import BatchError = API.BatchError;
292+
export import BatchRequestCounts = API.BatchRequestCounts;
293+
export import BatchCreateParams = API.BatchCreateParams;
294+
288295
export import ErrorObject = API.ErrorObject;
289296
export import FunctionDefinition = API.FunctionDefinition;
290297
export import FunctionParameters = API.FunctionParameters;

Diff for: src/resources/batches.ts

+225
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,225 @@
1+
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
import * as Core from 'openai/core';
4+
import { APIResource } from 'openai/resource';
5+
import * as BatchesAPI from 'openai/resources/batches';
6+
7+
export class Batches extends APIResource {
8+
/**
9+
* Creates and executes a batch from an uploaded file of requests
10+
*/
11+
create(body: BatchCreateParams, options?: Core.RequestOptions): Core.APIPromise<Batch> {
12+
return this._client.post('/batches', { body, ...options });
13+
}
14+
15+
/**
16+
* Retrieves a batch.
17+
*/
18+
retrieve(batchId: string, options?: Core.RequestOptions): Core.APIPromise<Batch> {
19+
return this._client.get(`/batches/${batchId}`, options);
20+
}
21+
22+
/**
23+
* Cancels an in-progress batch.
24+
*/
25+
cancel(batchId: string, options?: Core.RequestOptions): Core.APIPromise<Batch> {
26+
return this._client.post(`/batches/${batchId}/cancel`, options);
27+
}
28+
}
29+
30+
export interface Batch {
31+
id: string;
32+
33+
/**
34+
* The time frame within which the batch should be processed.
35+
*/
36+
completion_window: string;
37+
38+
/**
39+
* The Unix timestamp (in seconds) for when the batch was created.
40+
*/
41+
created_at: string;
42+
43+
/**
44+
* The OpenAI API endpoint used by the batch.
45+
*/
46+
endpoint: string;
47+
48+
/**
49+
* The ID of the input file for the batch.
50+
*/
51+
input_file_id: string;
52+
53+
/**
54+
* The object type, which is always `batch`.
55+
*/
56+
object: 'batch';
57+
58+
/**
59+
* The current status of the batch.
60+
*/
61+
status:
62+
| 'validating'
63+
| 'failed'
64+
| 'in_progress'
65+
| 'finalizing'
66+
| 'completed'
67+
| 'expired'
68+
| 'cancelling'
69+
| 'cancelled';
70+
71+
/**
72+
* The Unix timestamp (in seconds) for when the batch was cancelled.
73+
*/
74+
cancelled_at?: string;
75+
76+
/**
77+
* The Unix timestamp (in seconds) for when the batch started cancelling.
78+
*/
79+
cancelling_at?: string;
80+
81+
/**
82+
* The Unix timestamp (in seconds) for when the batch was completed.
83+
*/
84+
completed_at?: string;
85+
86+
/**
87+
* The ID of the file containing the outputs of requests with errors.
88+
*/
89+
error_file_id?: string;
90+
91+
errors?: Batch.Errors;
92+
93+
/**
94+
* The Unix timestamp (in seconds) for when the batch expired.
95+
*/
96+
expired_at?: string;
97+
98+
/**
99+
* The Unix timestamp (in seconds) for when the batch will expire.
100+
*/
101+
expires_at?: string;
102+
103+
/**
104+
* The Unix timestamp (in seconds) for when the batch failed.
105+
*/
106+
failed_at?: string;
107+
108+
/**
109+
* The Unix timestamp (in seconds) for when the batch started finalizing.
110+
*/
111+
finalizing_at?: string;
112+
113+
/**
114+
* The Unix timestamp (in seconds) for when the batch started processing.
115+
*/
116+
in_progress_at?: string;
117+
118+
/**
119+
* Set of 16 key-value pairs that can be attached to an object. This can be useful
120+
* for storing additional information about the object in a structured format. Keys
121+
* can be a maximum of 64 characters long and values can be a maxium of 512
122+
* characters long.
123+
*/
124+
metadata?: unknown | null;
125+
126+
/**
127+
* The ID of the file containing the outputs of successfully executed requests.
128+
*/
129+
output_file_id?: string;
130+
131+
/**
132+
* The request counts for different statuses within the batch.
133+
*/
134+
request_counts?: BatchRequestCounts;
135+
}
136+
137+
export namespace Batch {
138+
export interface Errors {
139+
data?: Array<BatchesAPI.BatchError>;
140+
141+
/**
142+
* The object type, which is always `list`.
143+
*/
144+
object?: string;
145+
}
146+
}
147+
148+
export interface BatchError {
149+
/**
150+
* An error code identifying the error type.
151+
*/
152+
code?: string;
153+
154+
/**
155+
* The line number of the input file where the error occurred, if applicable.
156+
*/
157+
line?: number | null;
158+
159+
/**
160+
* A human-readable message providing more details about the error.
161+
*/
162+
message?: string;
163+
164+
/**
165+
* The name of the parameter that caused the error, if applicable.
166+
*/
167+
param?: string | null;
168+
}
169+
170+
/**
171+
* The request counts for different statuses within the batch.
172+
*/
173+
export interface BatchRequestCounts {
174+
/**
175+
* Number of requests that have been completed successfully.
176+
*/
177+
completed: number;
178+
179+
/**
180+
* Number of requests that have failed.
181+
*/
182+
failed: number;
183+
184+
/**
185+
* Total number of requests in the batch.
186+
*/
187+
total: number;
188+
}
189+
190+
export interface BatchCreateParams {
191+
/**
192+
* The time frame within which the batch should be processed. Currently only `24h`
193+
* is supported.
194+
*/
195+
completion_window: '24h';
196+
197+
/**
198+
* The endpoint to be used for all requests in the batch. Currently only
199+
* `/v1/chat/completions` is supported.
200+
*/
201+
endpoint: '/v1/chat/completions';
202+
203+
/**
204+
* The ID of an uploaded file that contains requests for the new batch.
205+
*
206+
* See [upload file](https://platform.openai.com/docs/api-reference/files/create)
207+
* for how to upload a file.
208+
*
209+
* Your input file must be formatted as a JSONL file, and must be uploaded with the
210+
* purpose `batch`.
211+
*/
212+
input_file_id: string;
213+
214+
/**
215+
* Optional custom metadata for the batch.
216+
*/
217+
metadata?: Record<string, string> | null;
218+
}
219+
220+
export namespace Batches {
221+
export import Batch = BatchesAPI.Batch;
222+
export import BatchError = BatchesAPI.BatchError;
223+
export import BatchRequestCounts = BatchesAPI.BatchRequestCounts;
224+
export import BatchCreateParams = BatchesAPI.BatchCreateParams;
225+
}

Diff for: src/resources/index.ts

+1
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
export * from './chat/index';
44
export * from './shared';
55
export { Audio } from './audio/audio';
6+
export { Batch, BatchError, BatchRequestCounts, BatchCreateParams, Batches } from './batches';
67
export { Beta } from './beta/beta';
78
export {
89
Completion,

Diff for: tests/api-resources/batches.test.ts

+71
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
import OpenAI from 'openai';
4+
import { Response } from 'node-fetch';
5+
6+
const openai = new OpenAI({
7+
apiKey: 'My API Key',
8+
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
9+
});
10+
11+
describe('resource batches', () => {
12+
test('create: only required params', async () => {
13+
const responsePromise = openai.batches.create({
14+
completion_window: '24h',
15+
endpoint: '/v1/chat/completions',
16+
input_file_id: 'string',
17+
});
18+
const rawResponse = await responsePromise.asResponse();
19+
expect(rawResponse).toBeInstanceOf(Response);
20+
const response = await responsePromise;
21+
expect(response).not.toBeInstanceOf(Response);
22+
const dataAndResponse = await responsePromise.withResponse();
23+
expect(dataAndResponse.data).toBe(response);
24+
expect(dataAndResponse.response).toBe(rawResponse);
25+
});
26+
27+
test('create: required and optional params', async () => {
28+
const response = await openai.batches.create({
29+
completion_window: '24h',
30+
endpoint: '/v1/chat/completions',
31+
input_file_id: 'string',
32+
metadata: { foo: 'string' },
33+
});
34+
});
35+
36+
test('retrieve', async () => {
37+
const responsePromise = openai.batches.retrieve('string');
38+
const rawResponse = await responsePromise.asResponse();
39+
expect(rawResponse).toBeInstanceOf(Response);
40+
const response = await responsePromise;
41+
expect(response).not.toBeInstanceOf(Response);
42+
const dataAndResponse = await responsePromise.withResponse();
43+
expect(dataAndResponse.data).toBe(response);
44+
expect(dataAndResponse.response).toBe(rawResponse);
45+
});
46+
47+
test('retrieve: request options instead of params are passed correctly', async () => {
48+
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
49+
await expect(openai.batches.retrieve('string', { path: '/_stainless_unknown_path' })).rejects.toThrow(
50+
OpenAI.NotFoundError,
51+
);
52+
});
53+
54+
test('cancel', async () => {
55+
const responsePromise = openai.batches.cancel('string');
56+
const rawResponse = await responsePromise.asResponse();
57+
expect(rawResponse).toBeInstanceOf(Response);
58+
const response = await responsePromise;
59+
expect(response).not.toBeInstanceOf(Response);
60+
const dataAndResponse = await responsePromise.withResponse();
61+
expect(dataAndResponse.data).toBe(response);
62+
expect(dataAndResponse.response).toBe(rawResponse);
63+
});
64+
65+
test('cancel: request options instead of params are passed correctly', async () => {
66+
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
67+
await expect(openai.batches.cancel('string', { path: '/_stainless_unknown_path' })).rejects.toThrow(
68+
OpenAI.NotFoundError,
69+
);
70+
});
71+
});

0 commit comments

Comments
 (0)