Skip to content

release: 1.39.0 #1598

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Aug 5, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "1.38.0"
".": "1.39.0"
}
20 changes: 20 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,25 @@
# Changelog

## 1.39.0 (2024-08-05)

Full Changelog: [v1.38.0...v1.39.0](https://github.com/openai/openai-python/compare/v1.38.0...v1.39.0)

### Features

* **client:** add `retries_taken` to raw response class ([#1601](https://github.com/openai/openai-python/issues/1601)) ([777822b](https://github.com/openai/openai-python/commit/777822b39b7f9ebd6272d0af8fc04f9d657bd886))


### Bug Fixes

* **assistants:** add parallel_tool_calls param to runs.stream ([113e82a](https://github.com/openai/openai-python/commit/113e82a82c7390660ad3324fa8f9842f83b27571))


### Chores

* **internal:** bump pyright ([#1599](https://github.com/openai/openai-python/issues/1599)) ([27f0f10](https://github.com/openai/openai-python/commit/27f0f107e39d16adc0d5a50ffe4c687e0e3c42e5))
* **internal:** test updates ([#1602](https://github.com/openai/openai-python/issues/1602)) ([af22d80](https://github.com/openai/openai-python/commit/af22d8079cf44cde5f03a206e78b900f8413dc43))
* **internal:** use `TypeAlias` marker for type assignments ([#1597](https://github.com/openai/openai-python/issues/1597)) ([5907ea0](https://github.com/openai/openai-python/commit/5907ea04d6f5e0ffd17c38ad6a644a720ece8abe))

## 1.38.0 (2024-08-02)

Full Changelog: [v1.37.2...v1.38.0](https://github.com/openai/openai-python/compare/v1.37.2...v1.38.0)
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "openai"
version = "1.38.0"
version = "1.39.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
2 changes: 1 addition & 1 deletion requirements-dev.lock
Original file line number Diff line number Diff line change
@@ -123,7 +123,7 @@ pygments==2.18.0
# via rich
pyjwt==2.8.0
# via msal
pyright==1.1.364
pyright==1.1.374
pytest==7.1.1
# via pytest-asyncio
pytest-asyncio==0.21.1
10 changes: 10 additions & 0 deletions src/openai/_base_client.py
Original file line number Diff line number Diff line change
@@ -1051,6 +1051,7 @@ def _request(
response=response,
stream=stream,
stream_cls=stream_cls,
retries_taken=options.get_max_retries(self.max_retries) - retries,
)

def _retry_request(
@@ -1092,6 +1093,7 @@ def _process_response(
response: httpx.Response,
stream: bool,
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
retries_taken: int = 0,
) -> ResponseT:
if response.request.headers.get(RAW_RESPONSE_HEADER) == "true":
return cast(
@@ -1103,6 +1105,7 @@ def _process_response(
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
),
)

@@ -1122,6 +1125,7 @@ def _process_response(
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
),
)

@@ -1135,6 +1139,7 @@ def _process_response(
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
)
if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
return cast(ResponseT, api_response)
@@ -1625,6 +1630,7 @@ async def _request(
response=response,
stream=stream,
stream_cls=stream_cls,
retries_taken=options.get_max_retries(self.max_retries) - retries,
)

async def _retry_request(
@@ -1664,6 +1670,7 @@ async def _process_response(
response: httpx.Response,
stream: bool,
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
retries_taken: int = 0,
) -> ResponseT:
if response.request.headers.get(RAW_RESPONSE_HEADER) == "true":
return cast(
@@ -1675,6 +1682,7 @@ async def _process_response(
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
),
)

@@ -1694,6 +1702,7 @@ async def _process_response(
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
),
)

@@ -1707,6 +1716,7 @@ async def _process_response(
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
)
if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
return cast(ResponseT, api_response)
18 changes: 17 additions & 1 deletion src/openai/_legacy_response.py
Original file line number Diff line number Diff line change
@@ -5,7 +5,18 @@
import logging
import datetime
import functools
from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, Iterator, AsyncIterator, cast, overload
from typing import (
TYPE_CHECKING,
Any,
Union,
Generic,
TypeVar,
Callable,
Iterator,
AsyncIterator,
cast,
overload,
)
from typing_extensions import Awaitable, ParamSpec, override, deprecated, get_origin

import anyio
@@ -53,6 +64,9 @@ class LegacyAPIResponse(Generic[R]):

http_response: httpx.Response

retries_taken: int
"""The number of retries made. If no retries happened this will be `0`"""

def __init__(
self,
*,
@@ -62,6 +76,7 @@ def __init__(
stream: bool,
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
options: FinalRequestOptions,
retries_taken: int = 0,
) -> None:
self._cast_to = cast_to
self._client = client
@@ -70,6 +85,7 @@ def __init__(
self._stream_cls = stream_cls
self._options = options
self.http_response = raw
self.retries_taken = retries_taken

@property
def request_id(self) -> str | None:
5 changes: 5 additions & 0 deletions src/openai/_response.py
Original file line number Diff line number Diff line change
@@ -55,6 +55,9 @@ class BaseAPIResponse(Generic[R]):

http_response: httpx.Response

retries_taken: int
"""The number of retries made. If no retries happened this will be `0`"""

def __init__(
self,
*,
@@ -64,6 +67,7 @@ def __init__(
stream: bool,
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
options: FinalRequestOptions,
retries_taken: int = 0,
) -> None:
self._cast_to = cast_to
self._client = client
@@ -72,6 +76,7 @@ def __init__(
self._stream_cls = stream_cls
self._options = options
self.http_response = raw
self.retries_taken = retries_taken

@property
def headers(self) -> httpx.Headers:
2 changes: 1 addition & 1 deletion src/openai/_utils/_reflection.py
Original file line number Diff line number Diff line change
@@ -34,7 +34,7 @@ def assert_signatures_in_sync(

if custom_param.annotation != source_param.annotation:
errors.append(
f"types for the `{name}` param are do not match; source={repr(source_param.annotation)} checking={repr(source_param.annotation)}"
f"types for the `{name}` param are do not match; source={repr(source_param.annotation)} checking={repr(custom_param.annotation)}"
)
continue

2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "openai"
__version__ = "1.38.0" # x-release-please-version
__version__ = "1.39.0" # x-release-please-version
8 changes: 8 additions & 0 deletions src/openai/resources/beta/threads/runs/runs.py
Original file line number Diff line number Diff line change
@@ -950,6 +950,7 @@ def stream(
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -979,6 +980,7 @@ def stream(
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -1008,6 +1010,7 @@ def stream(
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -1051,6 +1054,7 @@ def stream(
"tool_choice": tool_choice,
"stream": True,
"tools": tools,
"parallel_tool_calls": parallel_tool_calls,
"truncation_strategy": truncation_strategy,
"top_p": top_p,
},
@@ -2246,6 +2250,7 @@ def stream(
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -2275,6 +2280,7 @@ def stream(
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -2304,6 +2310,7 @@ def stream(
max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -2349,6 +2356,7 @@ def stream(
"tool_choice": tool_choice,
"stream": True,
"tools": tools,
"parallel_tool_calls": parallel_tool_calls,
"truncation_strategy": truncation_strategy,
"top_p": top_p,
},
4 changes: 2 additions & 2 deletions src/openai/types/audio/speech_model.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing_extensions import Literal
from typing_extensions import Literal, TypeAlias

__all__ = ["SpeechModel"]

SpeechModel = Literal["tts-1", "tts-1-hd"]
SpeechModel: TypeAlias = Literal["tts-1", "tts-1-hd"]
4 changes: 2 additions & 2 deletions src/openai/types/audio_model.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing_extensions import Literal
from typing_extensions import Literal, TypeAlias

__all__ = ["AudioModel"]

AudioModel = Literal["whisper-1"]
AudioModel: TypeAlias = Literal["whisper-1"]
4 changes: 2 additions & 2 deletions src/openai/types/beta/assistant_create_params.py
Original file line number Diff line number Diff line change
@@ -3,7 +3,7 @@
from __future__ import annotations

from typing import List, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypedDict
from typing_extensions import Literal, Required, TypeAlias, TypedDict

from ..chat_model import ChatModel
from .assistant_tool_param import AssistantToolParam
@@ -140,7 +140,7 @@ class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=
"""Always `static`."""


ToolResourcesFileSearchVectorStoreChunkingStrategy = Union[
ToolResourcesFileSearchVectorStoreChunkingStrategy: TypeAlias = Union[
ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic
]

4 changes: 2 additions & 2 deletions src/openai/types/beta/assistant_response_format_option.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import Union
from typing_extensions import Literal
from typing_extensions import Literal, TypeAlias

from .assistant_response_format import AssistantResponseFormat

__all__ = ["AssistantResponseFormatOption"]

AssistantResponseFormatOption = Union[Literal["none", "auto"], AssistantResponseFormat]
AssistantResponseFormatOption: TypeAlias = Union[Literal["none", "auto"], AssistantResponseFormat]
Original file line number Diff line number Diff line change
@@ -3,10 +3,10 @@
from __future__ import annotations

from typing import Union
from typing_extensions import Literal
from typing_extensions import Literal, TypeAlias

from .assistant_response_format_param import AssistantResponseFormatParam

__all__ = ["AssistantResponseFormatOptionParam"]

AssistantResponseFormatOptionParam = Union[Literal["none", "auto"], AssistantResponseFormatParam]
AssistantResponseFormatOptionParam: TypeAlias = Union[Literal["none", "auto"], AssistantResponseFormatParam]
4 changes: 2 additions & 2 deletions src/openai/types/beta/assistant_stream_event.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import Union
from typing_extensions import Literal, Annotated
from typing_extensions import Literal, Annotated, TypeAlias

from .thread import Thread
from ..._utils import PropertyInfo
@@ -260,7 +260,7 @@ class ErrorEvent(BaseModel):
event: Literal["error"]


AssistantStreamEvent = Annotated[
AssistantStreamEvent: TypeAlias = Annotated[
Union[
ThreadCreated,
ThreadRunCreated,
6 changes: 4 additions & 2 deletions src/openai/types/beta/assistant_tool.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import Union
from typing_extensions import Annotated
from typing_extensions import Annotated, TypeAlias

from ..._utils import PropertyInfo
from .function_tool import FunctionTool
@@ -10,4 +10,6 @@

__all__ = ["AssistantTool"]

AssistantTool = Annotated[Union[CodeInterpreterTool, FileSearchTool, FunctionTool], PropertyInfo(discriminator="type")]
AssistantTool: TypeAlias = Annotated[
Union[CodeInterpreterTool, FileSearchTool, FunctionTool], PropertyInfo(discriminator="type")
]
4 changes: 2 additions & 2 deletions src/openai/types/beta/assistant_tool_choice_option.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import Union
from typing_extensions import Literal
from typing_extensions import Literal, TypeAlias

from .assistant_tool_choice import AssistantToolChoice

__all__ = ["AssistantToolChoiceOption"]

AssistantToolChoiceOption = Union[Literal["none", "auto", "required"], AssistantToolChoice]
AssistantToolChoiceOption: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoice]
Loading