Skip to content

Commit f588695

Browse files
stainless-app[bot]pakrym-oaiRobertCraigiekwhinnery-openaifmegen
authored
release: 1.82.0 (#2372)
* Add background streaming * -m rest of the implementation * docs(readme): fix async example * docs(readme): another async example fix * fix(azure): mark images/edits as a deployment endpoint #2371 * feat(api): new streaming helpers for background responses * release: 1.82.0 --------- Co-authored-by: pakrym-oai <[email protected]> Co-authored-by: Robert Craigie <[email protected]> Co-authored-by: Kevin Whinnery <[email protected]> Co-authored-by: Friedel van Megen <[email protected]> Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
1 parent 71058dd commit f588695

31 files changed

+726
-239
lines changed

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "1.81.0"
2+
".": "1.82.0"
33
}

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 111
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6af14840a810139bf407013167ce1c8fb21b6ef8eb0cc3db58b51af7d52c4b5a.yml
3-
openapi_spec_hash: 3241bde6b273cfec0035e522bd07985d
4-
config_hash: 7367b68a4e7db36885c1a886f57b17f6
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-fc64d7c2c8f51f750813375356c3f3fdfc7fc1b1b34f19c20a5410279d445d37.yml
3+
openapi_spec_hash: 618285fc70199ee32b9ebe4bf72f7e4c
4+
config_hash: c497f6b750cc89c0bf2eefc0bc839c70

CHANGELOG.md

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,24 @@
11
# Changelog
22

3+
## 1.82.0 (2025-05-22)
4+
5+
Full Changelog: [v1.81.0...v1.82.0](https://github.com/openai/openai-python/compare/v1.81.0...v1.82.0)
6+
7+
### Features
8+
9+
* **api:** new streaming helpers for background responses ([2a65d4d](https://github.com/openai/openai-python/commit/2a65d4de0aaba7801edd0df10f225530fd4969bd))
10+
11+
12+
### Bug Fixes
13+
14+
* **azure:** mark images/edits as a deployment endpoint [#2371](https://github.com/openai/openai-python/issues/2371) ([5d1d5b4](https://github.com/openai/openai-python/commit/5d1d5b4b6072afe9fd7909b1a36014c8c11c1ad6))
15+
16+
17+
### Documentation
18+
19+
* **readme:** another async example fix ([9ec8289](https://github.com/openai/openai-python/commit/9ec8289041f395805c67efd97847480f84eb9dac))
20+
* **readme:** fix async example ([37d0b25](https://github.com/openai/openai-python/commit/37d0b25b6e82cd381e5d1aa6e28f1a1311d02353))
21+
322
## 1.81.0 (2025-05-21)
423

524
Full Changelog: [v1.80.0...v1.81.0](https://github.com/openai/openai-python/compare/v1.80.0...v1.81.0)

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -174,13 +174,13 @@ client = AsyncOpenAI()
174174

175175

176176
async def main():
177-
stream = client.responses.create(
177+
stream = await client.responses.create(
178178
model="gpt-4o",
179179
input="Write a one-sentence bedtime story about a unicorn.",
180180
stream=True,
181181
)
182182

183-
for event in stream:
183+
async for event in stream:
184184
print(event)
185185

186186

api.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -764,7 +764,6 @@ from openai.types.responses import (
764764
ResponseRefusalDoneEvent,
765765
ResponseStatus,
766766
ResponseStreamEvent,
767-
ResponseTextAnnotationDeltaEvent,
768767
ResponseTextConfig,
769768
ResponseTextDeltaEvent,
770769
ResponseTextDoneEvent,

examples/responses/background.py

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
from typing import List
2+
3+
import rich
4+
from pydantic import BaseModel
5+
6+
from openai import OpenAI
7+
8+
9+
class Step(BaseModel):
10+
explanation: str
11+
output: str
12+
13+
14+
class MathResponse(BaseModel):
15+
steps: List[Step]
16+
final_answer: str
17+
18+
19+
client = OpenAI()
20+
id = None
21+
22+
with client.responses.create(
23+
input="solve 8x + 31 = 2",
24+
model="gpt-4o-2024-08-06",
25+
background=True,
26+
stream=True,
27+
) as stream:
28+
for event in stream:
29+
if event.type == "response.created":
30+
id = event.response.id
31+
if "output_text" in event.type:
32+
rich.print(event)
33+
if event.sequence_number == 10:
34+
break
35+
36+
print("Interrupted. Continuing...")
37+
38+
assert id is not None
39+
with client.responses.retrieve(
40+
response_id=id,
41+
stream=True,
42+
starting_after=10,
43+
) as stream:
44+
for event in stream:
45+
if "output_text" in event.type:
46+
rich.print(event)
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
import asyncio
2+
from typing import List
3+
4+
import rich
5+
from pydantic import BaseModel
6+
7+
from openai._client import AsyncOpenAI
8+
9+
10+
class Step(BaseModel):
11+
explanation: str
12+
output: str
13+
14+
15+
class MathResponse(BaseModel):
16+
steps: List[Step]
17+
final_answer: str
18+
19+
20+
async def main() -> None:
21+
client = AsyncOpenAI()
22+
id = None
23+
24+
async with await client.responses.create(
25+
input="solve 8x + 31 = 2",
26+
model="gpt-4o-2024-08-06",
27+
background=True,
28+
stream=True,
29+
) as stream:
30+
async for event in stream:
31+
if event.type == "response.created":
32+
id = event.response.id
33+
if "output_text" in event.type:
34+
rich.print(event)
35+
if event.sequence_number == 10:
36+
break
37+
38+
print("Interrupted. Continuing...")
39+
40+
assert id is not None
41+
async with await client.responses.retrieve(
42+
response_id=id,
43+
stream=True,
44+
starting_after=10,
45+
) as stream:
46+
async for event in stream:
47+
if "output_text" in event.type:
48+
rich.print(event)
49+
50+
51+
if __name__ == "__main__":
52+
asyncio.run(main())
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
#!/usr/bin/env -S rye run python
2+
from typing import List
3+
4+
import rich
5+
from pydantic import BaseModel
6+
7+
from openai import OpenAI
8+
9+
10+
class Step(BaseModel):
11+
explanation: str
12+
output: str
13+
14+
15+
class MathResponse(BaseModel):
16+
steps: List[Step]
17+
final_answer: str
18+
19+
20+
client = OpenAI()
21+
id = None
22+
with client.responses.stream(
23+
input="solve 8x + 31 = 2",
24+
model="gpt-4o-2024-08-06",
25+
text_format=MathResponse,
26+
background=True,
27+
) as stream:
28+
for event in stream:
29+
if event.type == "response.created":
30+
id = event.response.id
31+
if "output_text" in event.type:
32+
rich.print(event)
33+
if event.sequence_number == 10:
34+
break
35+
36+
print("Interrupted. Continuing...")
37+
38+
assert id is not None
39+
with client.responses.stream(
40+
response_id=id,
41+
starting_after=10,
42+
text_format=MathResponse,
43+
) as stream:
44+
for event in stream:
45+
if "output_text" in event.type:
46+
rich.print(event)
47+
48+
rich.print(stream.get_final_response())
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
import asyncio
2+
from typing import List
3+
4+
import rich
5+
from pydantic import BaseModel
6+
7+
from openai import AsyncOpenAI
8+
9+
10+
class Step(BaseModel):
11+
explanation: str
12+
output: str
13+
14+
15+
class MathResponse(BaseModel):
16+
steps: List[Step]
17+
final_answer: str
18+
19+
20+
async def main() -> None:
21+
client = AsyncOpenAI()
22+
id = None
23+
async with client.responses.stream(
24+
input="solve 8x + 31 = 2",
25+
model="gpt-4o-2024-08-06",
26+
text_format=MathResponse,
27+
background=True,
28+
) as stream:
29+
async for event in stream:
30+
if event.type == "response.created":
31+
id = event.response.id
32+
if "output_text" in event.type:
33+
rich.print(event)
34+
if event.sequence_number == 10:
35+
break
36+
37+
print("Interrupted. Continuing...")
38+
39+
assert id is not None
40+
async with client.responses.stream(
41+
response_id=id,
42+
starting_after=10,
43+
text_format=MathResponse,
44+
) as stream:
45+
async for event in stream:
46+
if "output_text" in event.type:
47+
rich.print(event)
48+
49+
rich.print(stream.get_final_response())
50+
51+
52+
if __name__ == "__main__":
53+
asyncio.run(main())

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "openai"
3-
version = "1.81.0"
3+
version = "1.82.0"
44
description = "The official Python library for the openai API"
55
dynamic = ["readme"]
66
license = "Apache-2.0"

src/openai/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

33
__title__ = "openai"
4-
__version__ = "1.81.0" # x-release-please-version
4+
__version__ = "1.82.0" # x-release-please-version

src/openai/lib/azure.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
"/audio/translations",
2626
"/audio/speech",
2727
"/images/generations",
28+
"/images/edits",
2829
]
2930
)
3031

src/openai/lib/streaming/responses/_events.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@
3131
ResponseMcpCallInProgressEvent,
3232
ResponseMcpListToolsFailedEvent,
3333
ResponseAudioTranscriptDoneEvent,
34-
ResponseTextAnnotationDeltaEvent,
3534
ResponseAudioTranscriptDeltaEvent,
3635
ResponseMcpCallArgumentsDoneEvent,
3736
ResponseReasoningSummaryDoneEvent,
@@ -118,7 +117,6 @@ class ResponseCompletedEvent(RawResponseCompletedEvent, GenericModel, Generic[Te
118117
ResponseOutputItemDoneEvent,
119118
ResponseRefusalDeltaEvent,
120119
ResponseRefusalDoneEvent,
121-
ResponseTextAnnotationDeltaEvent,
122120
ResponseTextDoneEvent,
123121
ResponseWebSearchCallCompletedEvent,
124122
ResponseWebSearchCallInProgressEvent,

src/openai/lib/streaming/responses/_responses.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,11 +34,13 @@ def __init__(
3434
raw_stream: Stream[RawResponseStreamEvent],
3535
text_format: type[TextFormatT] | NotGiven,
3636
input_tools: Iterable[ToolParam] | NotGiven,
37+
starting_after: int | None,
3738
) -> None:
3839
self._raw_stream = raw_stream
3940
self._response = raw_stream.response
4041
self._iterator = self.__stream__()
4142
self._state = ResponseStreamState(text_format=text_format, input_tools=input_tools)
43+
self._starting_after = starting_after
4244

4345
def __next__(self) -> ResponseStreamEvent[TextFormatT]:
4446
return self._iterator.__next__()
@@ -54,7 +56,8 @@ def __stream__(self) -> Iterator[ResponseStreamEvent[TextFormatT]]:
5456
for sse_event in self._raw_stream:
5557
events_to_fire = self._state.handle_event(sse_event)
5658
for event in events_to_fire:
57-
yield event
59+
if self._starting_after is None or event.sequence_number > self._starting_after:
60+
yield event
5861

5962
def __exit__(
6063
self,
@@ -96,11 +99,13 @@ def __init__(
9699
*,
97100
text_format: type[TextFormatT] | NotGiven,
98101
input_tools: Iterable[ToolParam] | NotGiven,
102+
starting_after: int | None,
99103
) -> None:
100104
self.__stream: ResponseStream[TextFormatT] | None = None
101105
self.__api_request = api_request
102106
self.__text_format = text_format
103107
self.__input_tools = input_tools
108+
self.__starting_after = starting_after
104109

105110
def __enter__(self) -> ResponseStream[TextFormatT]:
106111
raw_stream = self.__api_request()
@@ -109,6 +114,7 @@ def __enter__(self) -> ResponseStream[TextFormatT]:
109114
raw_stream=raw_stream,
110115
text_format=self.__text_format,
111116
input_tools=self.__input_tools,
117+
starting_after=self.__starting_after,
112118
)
113119

114120
return self.__stream
@@ -130,11 +136,13 @@ def __init__(
130136
raw_stream: AsyncStream[RawResponseStreamEvent],
131137
text_format: type[TextFormatT] | NotGiven,
132138
input_tools: Iterable[ToolParam] | NotGiven,
139+
starting_after: int | None,
133140
) -> None:
134141
self._raw_stream = raw_stream
135142
self._response = raw_stream.response
136143
self._iterator = self.__stream__()
137144
self._state = ResponseStreamState(text_format=text_format, input_tools=input_tools)
145+
self._starting_after = starting_after
138146

139147
async def __anext__(self) -> ResponseStreamEvent[TextFormatT]:
140148
return await self._iterator.__anext__()
@@ -147,7 +155,8 @@ async def __stream__(self) -> AsyncIterator[ResponseStreamEvent[TextFormatT]]:
147155
async for sse_event in self._raw_stream:
148156
events_to_fire = self._state.handle_event(sse_event)
149157
for event in events_to_fire:
150-
yield event
158+
if self._starting_after is None or event.sequence_number > self._starting_after:
159+
yield event
151160

152161
async def __aenter__(self) -> Self:
153162
return self
@@ -192,11 +201,13 @@ def __init__(
192201
*,
193202
text_format: type[TextFormatT] | NotGiven,
194203
input_tools: Iterable[ToolParam] | NotGiven,
204+
starting_after: int | None,
195205
) -> None:
196206
self.__stream: AsyncResponseStream[TextFormatT] | None = None
197207
self.__api_request = api_request
198208
self.__text_format = text_format
199209
self.__input_tools = input_tools
210+
self.__starting_after = starting_after
200211

201212
async def __aenter__(self) -> AsyncResponseStream[TextFormatT]:
202213
raw_stream = await self.__api_request
@@ -205,6 +216,7 @@ async def __aenter__(self) -> AsyncResponseStream[TextFormatT]:
205216
raw_stream=raw_stream,
206217
text_format=self.__text_format,
207218
input_tools=self.__input_tools,
219+
starting_after=self.__starting_after,
208220
)
209221

210222
return self.__stream

0 commit comments

Comments
 (0)