Skip to content

Commit 9fa1d10

Browse files
committed
feat(api): add incomplete state (openai#1420)
1 parent 28ce3fd commit 9fa1d10

14 files changed

+120
-79
lines changed

.stats.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 64
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-47007cc1aa5bc7b74107a99b377925978a0bd376ed67bdae724e80d5d0b63d57.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-363dd904e5d6e65b3a323fc88e6b502fb23a6aa319be219273e3ee47c7530993.yml

src/openai/resources/batches.py

+12-6
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def create(
4040
self,
4141
*,
4242
completion_window: Literal["24h"],
43-
endpoint: Literal["/v1/chat/completions", "/v1/embeddings"],
43+
endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
4444
input_file_id: str,
4545
metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
4646
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -58,7 +58,9 @@ def create(
5858
is supported.
5959
6060
endpoint: The endpoint to be used for all requests in the batch. Currently
61-
`/v1/chat/completions` and `/v1/embeddings` are supported.
61+
`/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.
62+
Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000
63+
embedding inputs across all requests in the batch.
6264
6365
input_file_id: The ID of an uploaded file that contains requests for the new batch.
6466
@@ -67,7 +69,8 @@ def create(
6769
6870
Your input file must be formatted as a
6971
[JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput),
70-
and must be uploaded with the purpose `batch`.
72+
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
73+
requests, and can be up to 100 MB in size.
7174
7275
metadata: Optional custom metadata for the batch.
7376
@@ -228,7 +231,7 @@ async def create(
228231
self,
229232
*,
230233
completion_window: Literal["24h"],
231-
endpoint: Literal["/v1/chat/completions", "/v1/embeddings"],
234+
endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"],
232235
input_file_id: str,
233236
metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
234237
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -246,7 +249,9 @@ async def create(
246249
is supported.
247250
248251
endpoint: The endpoint to be used for all requests in the batch. Currently
249-
`/v1/chat/completions` and `/v1/embeddings` are supported.
252+
`/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.
253+
Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000
254+
embedding inputs across all requests in the batch.
250255
251256
input_file_id: The ID of an uploaded file that contains requests for the new batch.
252257
@@ -255,7 +260,8 @@ async def create(
255260
256261
Your input file must be formatted as a
257262
[JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput),
258-
and must be uploaded with the purpose `batch`.
263+
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
264+
requests, and can be up to 100 MB in size.
259265
260266
metadata: Optional custom metadata for the batch.
261267

src/openai/resources/beta/assistants.py

+12-8
Original file line numberDiff line numberDiff line change
@@ -110,8 +110,9 @@ def create(
110110
name: The name of the assistant. The maximum length is 256 characters.
111111
112112
response_format: Specifies the format that the model must output. Compatible with
113-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
114-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
113+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
114+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
115+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
115116
116117
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
117118
message the model generates is valid JSON.
@@ -254,8 +255,9 @@ def update(
254255
name: The name of the assistant. The maximum length is 256 characters.
255256
256257
response_format: Specifies the format that the model must output. Compatible with
257-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
258-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
258+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
259+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
260+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
259261
260262
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
261263
message the model generates is valid JSON.
@@ -497,8 +499,9 @@ async def create(
497499
name: The name of the assistant. The maximum length is 256 characters.
498500
499501
response_format: Specifies the format that the model must output. Compatible with
500-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
501-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
502+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
503+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
504+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
502505
503506
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
504507
message the model generates is valid JSON.
@@ -641,8 +644,9 @@ async def update(
641644
name: The name of the assistant. The maximum length is 256 characters.
642645
643646
response_format: Specifies the format that the model must output. Compatible with
644-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
645-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
647+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
648+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
649+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
646650
647651
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
648652
message the model generates is valid JSON.

src/openai/resources/beta/threads/runs/runs.py

+20-14
Original file line numberDiff line numberDiff line change
@@ -164,8 +164,9 @@ def create(
164164
assistant will be used.
165165
166166
response_format: Specifies the format that the model must output. Compatible with
167-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
168-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
167+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
168+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
169+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
169170
170171
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
171172
message the model generates is valid JSON.
@@ -314,8 +315,9 @@ def create(
314315
assistant will be used.
315316
316317
response_format: Specifies the format that the model must output. Compatible with
317-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
318-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
318+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
319+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
320+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
319321
320322
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
321323
message the model generates is valid JSON.
@@ -460,8 +462,9 @@ def create(
460462
assistant will be used.
461463
462464
response_format: Specifies the format that the model must output. Compatible with
463-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
464-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
465+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
466+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
467+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
465468
466469
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
467470
message the model generates is valid JSON.
@@ -1097,7 +1100,7 @@ def poll(
10971100
if is_given(poll_interval_ms):
10981101
extra_headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
10991102

1100-
terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired"}
1103+
terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired", "incomplete"}
11011104
while True:
11021105
response = self.with_raw_response.retrieve(
11031106
thread_id=thread_id,
@@ -1718,8 +1721,9 @@ async def create(
17181721
assistant will be used.
17191722
17201723
response_format: Specifies the format that the model must output. Compatible with
1721-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1722-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
1724+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1725+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
1726+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
17231727
17241728
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
17251729
message the model generates is valid JSON.
@@ -1868,8 +1872,9 @@ async def create(
18681872
assistant will be used.
18691873
18701874
response_format: Specifies the format that the model must output. Compatible with
1871-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1872-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
1875+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1876+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
1877+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
18731878
18741879
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
18751880
message the model generates is valid JSON.
@@ -2014,8 +2019,9 @@ async def create(
20142019
assistant will be used.
20152020
20162021
response_format: Specifies the format that the model must output. Compatible with
2017-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
2018-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
2022+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
2023+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
2024+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
20192025
20202026
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
20212027
message the model generates is valid JSON.
@@ -2653,7 +2659,7 @@ async def poll(
26532659
if is_given(poll_interval_ms):
26542660
extra_headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
26552661

2656-
terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired"}
2662+
terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired", "incomplete"}
26572663
while True:
26582664
response = await self.with_raw_response.retrieve(
26592665
thread_id=thread_id,

src/openai/resources/beta/threads/threads.py

+18-12
Original file line numberDiff line numberDiff line change
@@ -341,8 +341,9 @@ def create_and_run(
341341
assistant will be used.
342342
343343
response_format: Specifies the format that the model must output. Compatible with
344-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
345-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
344+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
345+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
346+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
346347
347348
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
348349
message the model generates is valid JSON.
@@ -490,8 +491,9 @@ def create_and_run(
490491
assistant will be used.
491492
492493
response_format: Specifies the format that the model must output. Compatible with
493-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
494-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
494+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
495+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
496+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
495497
496498
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
497499
message the model generates is valid JSON.
@@ -635,8 +637,9 @@ def create_and_run(
635637
assistant will be used.
636638
637639
response_format: Specifies the format that the model must output. Compatible with
638-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
639-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
640+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
641+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
642+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
640643
641644
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
642645
message the model generates is valid JSON.
@@ -1331,8 +1334,9 @@ async def create_and_run(
13311334
assistant will be used.
13321335
13331336
response_format: Specifies the format that the model must output. Compatible with
1334-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1335-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
1337+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1338+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
1339+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
13361340
13371341
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
13381342
message the model generates is valid JSON.
@@ -1480,8 +1484,9 @@ async def create_and_run(
14801484
assistant will be used.
14811485
14821486
response_format: Specifies the format that the model must output. Compatible with
1483-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1484-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
1487+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1488+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
1489+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
14851490
14861491
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
14871492
message the model generates is valid JSON.
@@ -1625,8 +1630,9 @@ async def create_and_run(
16251630
assistant will be used.
16261631
16271632
response_format: Specifies the format that the model must output. Compatible with
1628-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1629-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
1633+
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1634+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
1635+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
16301636
16311637
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
16321638
message the model generates is valid JSON.

src/openai/resources/files.py

+22-14
Original file line numberDiff line numberDiff line change
@@ -62,14 +62,18 @@ def create(
6262
) -> FileObject:
6363
"""Upload a file that can be used across various endpoints.
6464
65-
The size of all the
66-
files uploaded by one organization can be up to 100 GB.
65+
Individual files can be
66+
up to 512 MB, and the size of all files uploaded by one organization can be up
67+
to 100 GB.
6768
68-
The size of individual files can be a maximum of 512 MB or 2 million tokens for
69-
Assistants. See the
70-
[Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to
71-
learn more about the types of files supported. The Fine-tuning API only supports
72-
`.jsonl` files.
69+
The Assistants API supports files up to 2 million tokens and of specific file
70+
types. See the
71+
[Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for
72+
details.
73+
74+
The Fine-tuning API only supports `.jsonl` files.
75+
76+
The Batch API only supports `.jsonl` files up to 100 MB in size.
7377
7478
Please [contact us](https://help.openai.com/) if you need to increase these
7579
storage limits.
@@ -335,14 +339,18 @@ async def create(
335339
) -> FileObject:
336340
"""Upload a file that can be used across various endpoints.
337341
338-
The size of all the
339-
files uploaded by one organization can be up to 100 GB.
342+
Individual files can be
343+
up to 512 MB, and the size of all files uploaded by one organization can be up
344+
to 100 GB.
345+
346+
The Assistants API supports files up to 2 million tokens and of specific file
347+
types. See the
348+
[Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for
349+
details.
350+
351+
The Fine-tuning API only supports `.jsonl` files.
340352
341-
The size of individual files can be a maximum of 512 MB or 2 million tokens for
342-
Assistants. See the
343-
[Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to
344-
learn more about the types of files supported. The Fine-tuning API only supports
345-
`.jsonl` files.
353+
The Batch API only supports `.jsonl` files up to 100 MB in size.
346354
347355
Please [contact us](https://help.openai.com/) if you need to increase these
348356
storage limits.

src/openai/types/batch_create_params.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,12 @@ class BatchCreateParams(TypedDict, total=False):
1515
Currently only `24h` is supported.
1616
"""
1717

18-
endpoint: Required[Literal["/v1/chat/completions", "/v1/embeddings"]]
18+
endpoint: Required[Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"]]
1919
"""The endpoint to be used for all requests in the batch.
2020
21-
Currently `/v1/chat/completions` and `/v1/embeddings` are supported.
21+
Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are
22+
supported. Note that `/v1/embeddings` batches are also restricted to a maximum
23+
of 50,000 embedding inputs across all requests in the batch.
2224
"""
2325

2426
input_file_id: Required[str]
@@ -29,7 +31,8 @@ class BatchCreateParams(TypedDict, total=False):
2931
3032
Your input file must be formatted as a
3133
[JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput),
32-
and must be uploaded with the purpose `batch`.
34+
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
35+
requests, and can be up to 100 MB in size.
3336
"""
3437

3538
metadata: Optional[Dict[str, str]]

src/openai/types/beta/assistant.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -85,9 +85,9 @@ class Assistant(BaseModel):
8585
response_format: Optional[AssistantResponseFormatOption] = None
8686
"""Specifies the format that the model must output.
8787
88-
Compatible with
89-
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
90-
all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
88+
Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
89+
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
90+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
9191
9292
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
9393
message the model generates is valid JSON.

0 commit comments

Comments
 (0)