Skip to content

release: 1.82.0 #2372

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
May 22, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "1.81.0"
".": "1.82.0"
}
6 changes: 3 additions & 3 deletions .stats.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
configured_endpoints: 111
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6af14840a810139bf407013167ce1c8fb21b6ef8eb0cc3db58b51af7d52c4b5a.yml
openapi_spec_hash: 3241bde6b273cfec0035e522bd07985d
config_hash: 7367b68a4e7db36885c1a886f57b17f6
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-fc64d7c2c8f51f750813375356c3f3fdfc7fc1b1b34f19c20a5410279d445d37.yml
openapi_spec_hash: 618285fc70199ee32b9ebe4bf72f7e4c
config_hash: c497f6b750cc89c0bf2eefc0bc839c70
19 changes: 19 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,24 @@
# Changelog

## 1.82.0 (2025-05-22)

Full Changelog: [v1.81.0...v1.82.0](https://github.com/openai/openai-python/compare/v1.81.0...v1.82.0)

### Features

* **api:** new streaming helpers for background responses ([2a65d4d](https://github.com/openai/openai-python/commit/2a65d4de0aaba7801edd0df10f225530fd4969bd))


### Bug Fixes

* **azure:** mark images/edits as a deployment endpoint [#2371](https://github.com/openai/openai-python/issues/2371) ([5d1d5b4](https://github.com/openai/openai-python/commit/5d1d5b4b6072afe9fd7909b1a36014c8c11c1ad6))


### Documentation

* **readme:** another async example fix ([9ec8289](https://github.com/openai/openai-python/commit/9ec8289041f395805c67efd97847480f84eb9dac))
* **readme:** fix async example ([37d0b25](https://github.com/openai/openai-python/commit/37d0b25b6e82cd381e5d1aa6e28f1a1311d02353))

## 1.81.0 (2025-05-21)

Full Changelog: [v1.80.0...v1.81.0](https://github.com/openai/openai-python/compare/v1.80.0...v1.81.0)
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -174,13 +174,13 @@ client = AsyncOpenAI()


async def main():
stream = client.responses.create(
stream = await client.responses.create(
model="gpt-4o",
input="Write a one-sentence bedtime story about a unicorn.",
stream=True,
)

for event in stream:
async for event in stream:
print(event)


Expand Down
1 change: 0 additions & 1 deletion api.md
Original file line number Diff line number Diff line change
Expand Up @@ -764,7 +764,6 @@ from openai.types.responses import (
ResponseRefusalDoneEvent,
ResponseStatus,
ResponseStreamEvent,
ResponseTextAnnotationDeltaEvent,
ResponseTextConfig,
ResponseTextDeltaEvent,
ResponseTextDoneEvent,
Expand Down
46 changes: 46 additions & 0 deletions examples/responses/background.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
from typing import List

import rich
from pydantic import BaseModel

from openai import OpenAI


class Step(BaseModel):
explanation: str
output: str


class MathResponse(BaseModel):
steps: List[Step]
final_answer: str


client = OpenAI()
id = None

with client.responses.create(
input="solve 8x + 31 = 2",
model="gpt-4o-2024-08-06",
background=True,
stream=True,
) as stream:
for event in stream:
if event.type == "response.created":
id = event.response.id
if "output_text" in event.type:
rich.print(event)
if event.sequence_number == 10:
break

print("Interrupted. Continuing...")

assert id is not None
with client.responses.retrieve(
response_id=id,
stream=True,
starting_after=10,
) as stream:
for event in stream:
if "output_text" in event.type:
rich.print(event)
52 changes: 52 additions & 0 deletions examples/responses/background_async.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import asyncio
from typing import List

import rich
from pydantic import BaseModel

from openai._client import AsyncOpenAI


class Step(BaseModel):
explanation: str
output: str


class MathResponse(BaseModel):
steps: List[Step]
final_answer: str


async def main() -> None:
client = AsyncOpenAI()
id = None

async with await client.responses.create(
input="solve 8x + 31 = 2",
model="gpt-4o-2024-08-06",
background=True,
stream=True,
) as stream:
async for event in stream:
if event.type == "response.created":
id = event.response.id
if "output_text" in event.type:
rich.print(event)
if event.sequence_number == 10:
break

print("Interrupted. Continuing...")

assert id is not None
async with await client.responses.retrieve(
response_id=id,
stream=True,
starting_after=10,
) as stream:
async for event in stream:
if "output_text" in event.type:
rich.print(event)


if __name__ == "__main__":
asyncio.run(main())
48 changes: 48 additions & 0 deletions examples/responses/background_streaming.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
#!/usr/bin/env -S rye run python
from typing import List

import rich
from pydantic import BaseModel

from openai import OpenAI


class Step(BaseModel):
explanation: str
output: str


class MathResponse(BaseModel):
steps: List[Step]
final_answer: str


client = OpenAI()
id = None
with client.responses.stream(
input="solve 8x + 31 = 2",
model="gpt-4o-2024-08-06",
text_format=MathResponse,
background=True,
) as stream:
for event in stream:
if event.type == "response.created":
id = event.response.id
if "output_text" in event.type:
rich.print(event)
if event.sequence_number == 10:
break

print("Interrupted. Continuing...")

assert id is not None
with client.responses.stream(
response_id=id,
starting_after=10,
text_format=MathResponse,
) as stream:
for event in stream:
if "output_text" in event.type:
rich.print(event)

rich.print(stream.get_final_response())
53 changes: 53 additions & 0 deletions examples/responses/background_streaming_async.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
import asyncio
from typing import List

import rich
from pydantic import BaseModel

from openai import AsyncOpenAI


class Step(BaseModel):
explanation: str
output: str


class MathResponse(BaseModel):
steps: List[Step]
final_answer: str


async def main() -> None:
client = AsyncOpenAI()
id = None
async with client.responses.stream(
input="solve 8x + 31 = 2",
model="gpt-4o-2024-08-06",
text_format=MathResponse,
background=True,
) as stream:
async for event in stream:
if event.type == "response.created":
id = event.response.id
if "output_text" in event.type:
rich.print(event)
if event.sequence_number == 10:
break

print("Interrupted. Continuing...")

assert id is not None
async with client.responses.stream(
response_id=id,
starting_after=10,
text_format=MathResponse,
) as stream:
async for event in stream:
if "output_text" in event.type:
rich.print(event)

rich.print(stream.get_final_response())


if __name__ == "__main__":
asyncio.run(main())
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "openai"
version = "1.81.0"
version = "1.82.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
Expand Down
2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "openai"
__version__ = "1.81.0" # x-release-please-version
__version__ = "1.82.0" # x-release-please-version
1 change: 1 addition & 0 deletions src/openai/lib/azure.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
"/audio/translations",
"/audio/speech",
"/images/generations",
"/images/edits",
]
)

Expand Down
2 changes: 0 additions & 2 deletions src/openai/lib/streaming/responses/_events.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
ResponseMcpCallInProgressEvent,
ResponseMcpListToolsFailedEvent,
ResponseAudioTranscriptDoneEvent,
ResponseTextAnnotationDeltaEvent,
ResponseAudioTranscriptDeltaEvent,
ResponseMcpCallArgumentsDoneEvent,
ResponseReasoningSummaryDoneEvent,
Expand Down Expand Up @@ -118,7 +117,6 @@ class ResponseCompletedEvent(RawResponseCompletedEvent, GenericModel, Generic[Te
ResponseOutputItemDoneEvent,
ResponseRefusalDeltaEvent,
ResponseRefusalDoneEvent,
ResponseTextAnnotationDeltaEvent,
ResponseTextDoneEvent,
ResponseWebSearchCallCompletedEvent,
ResponseWebSearchCallInProgressEvent,
Expand Down
16 changes: 14 additions & 2 deletions src/openai/lib/streaming/responses/_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,13 @@ def __init__(
raw_stream: Stream[RawResponseStreamEvent],
text_format: type[TextFormatT] | NotGiven,
input_tools: Iterable[ToolParam] | NotGiven,
starting_after: int | None,
) -> None:
self._raw_stream = raw_stream
self._response = raw_stream.response
self._iterator = self.__stream__()
self._state = ResponseStreamState(text_format=text_format, input_tools=input_tools)
self._starting_after = starting_after

def __next__(self) -> ResponseStreamEvent[TextFormatT]:
return self._iterator.__next__()
Expand All @@ -54,7 +56,8 @@ def __stream__(self) -> Iterator[ResponseStreamEvent[TextFormatT]]:
for sse_event in self._raw_stream:
events_to_fire = self._state.handle_event(sse_event)
for event in events_to_fire:
yield event
if self._starting_after is None or event.sequence_number > self._starting_after:
yield event

def __exit__(
self,
Expand Down Expand Up @@ -96,11 +99,13 @@ def __init__(
*,
text_format: type[TextFormatT] | NotGiven,
input_tools: Iterable[ToolParam] | NotGiven,
starting_after: int | None,
) -> None:
self.__stream: ResponseStream[TextFormatT] | None = None
self.__api_request = api_request
self.__text_format = text_format
self.__input_tools = input_tools
self.__starting_after = starting_after

def __enter__(self) -> ResponseStream[TextFormatT]:
raw_stream = self.__api_request()
Expand All @@ -109,6 +114,7 @@ def __enter__(self) -> ResponseStream[TextFormatT]:
raw_stream=raw_stream,
text_format=self.__text_format,
input_tools=self.__input_tools,
starting_after=self.__starting_after,
)

return self.__stream
Expand All @@ -130,11 +136,13 @@ def __init__(
raw_stream: AsyncStream[RawResponseStreamEvent],
text_format: type[TextFormatT] | NotGiven,
input_tools: Iterable[ToolParam] | NotGiven,
starting_after: int | None,
) -> None:
self._raw_stream = raw_stream
self._response = raw_stream.response
self._iterator = self.__stream__()
self._state = ResponseStreamState(text_format=text_format, input_tools=input_tools)
self._starting_after = starting_after

async def __anext__(self) -> ResponseStreamEvent[TextFormatT]:
return await self._iterator.__anext__()
Expand All @@ -147,7 +155,8 @@ async def __stream__(self) -> AsyncIterator[ResponseStreamEvent[TextFormatT]]:
async for sse_event in self._raw_stream:
events_to_fire = self._state.handle_event(sse_event)
for event in events_to_fire:
yield event
if self._starting_after is None or event.sequence_number > self._starting_after:
yield event

async def __aenter__(self) -> Self:
return self
Expand Down Expand Up @@ -192,11 +201,13 @@ def __init__(
*,
text_format: type[TextFormatT] | NotGiven,
input_tools: Iterable[ToolParam] | NotGiven,
starting_after: int | None,
) -> None:
self.__stream: AsyncResponseStream[TextFormatT] | None = None
self.__api_request = api_request
self.__text_format = text_format
self.__input_tools = input_tools
self.__starting_after = starting_after

async def __aenter__(self) -> AsyncResponseStream[TextFormatT]:
raw_stream = await self.__api_request
Expand All @@ -205,6 +216,7 @@ async def __aenter__(self) -> AsyncResponseStream[TextFormatT]:
raw_stream=raw_stream,
text_format=self.__text_format,
input_tools=self.__input_tools,
starting_after=self.__starting_after,
)

return self.__stream
Expand Down
Loading