Skip to content

Commit 1f9ff1d

Browse files
Merge branch 'main' into fix/openai#2092
2 parents 81a459b + 3f8d820 commit 1f9ff1d

25 files changed

+387
-72
lines changed

Diff for: .release-please-manifest.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "1.61.1"
2+
".": "1.62.0"
33
}

Diff for: .stats.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 69
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fc5dbc19505b0035f9e7f88868619f4fb519b048bde011f6154f3132d4be71fb.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-dfb00c627f58e5180af7a9b29ed2f2aa0764a3b9daa6a32a1cc45bc8e48dfe15.yml

Diff for: CHANGELOG.md

+22
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,27 @@
11
# Changelog
22

3+
## 1.62.0 (2025-02-12)
4+
5+
Full Changelog: [v1.61.1...v1.62.0](https://github.com/openai/openai-python/compare/v1.61.1...v1.62.0)
6+
7+
### Features
8+
9+
* **client:** send `X-Stainless-Read-Timeout` header ([#2094](https://github.com/openai/openai-python/issues/2094)) ([0288213](https://github.com/openai/openai-python/commit/0288213fbfa935c9bf9d56416619ea929ae1cf63))
10+
* **embeddings:** use stdlib array type for improved performance ([#2060](https://github.com/openai/openai-python/issues/2060)) ([9a95db9](https://github.com/openai/openai-python/commit/9a95db9154ac98678970e7f1652a7cacfd2f7fdb))
11+
* **pagination:** avoid fetching when has_more: false ([#2098](https://github.com/openai/openai-python/issues/2098)) ([1882483](https://github.com/openai/openai-python/commit/18824832d3a676ae49206cd2b5e09d4796fdf033))
12+
13+
14+
### Bug Fixes
15+
16+
* **api:** add missing reasoning effort + model enums ([#2096](https://github.com/openai/openai-python/issues/2096)) ([e0ca9f0](https://github.com/openai/openai-python/commit/e0ca9f0f6fae40230f8cab97573914ed632920b6))
17+
* **parsing:** don't default to an empty array ([#2106](https://github.com/openai/openai-python/issues/2106)) ([8e748bb](https://github.com/openai/openai-python/commit/8e748bb08d9c0d1f7e8a1af31452e25eb7154f55))
18+
19+
20+
### Chores
21+
22+
* **internal:** fix type traversing dictionary params ([#2097](https://github.com/openai/openai-python/issues/2097)) ([4e5b368](https://github.com/openai/openai-python/commit/4e5b368bf576f38d0f125778edde74ed6d101d7d))
23+
* **internal:** minor type handling changes ([#2099](https://github.com/openai/openai-python/issues/2099)) ([a2c6da0](https://github.com/openai/openai-python/commit/a2c6da0fbc610ee80a2e044a0b20fc1cc2376962))
24+
325
## 1.61.1 (2025-02-05)
426

527
Full Changelog: [v1.61.0...v1.61.1](https://github.com/openai/openai-python/compare/v1.61.0...v1.61.1)

Diff for: pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "openai"
3-
version = "1.61.1"
3+
version = "1.62.0"
44
description = "The official Python library for the openai API"
55
dynamic = ["readme"]
66
license = "Apache-2.0"

Diff for: src/openai/_base_client.py

+9-2
Original file line numberDiff line numberDiff line change
@@ -420,10 +420,17 @@ def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0
420420
if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers:
421421
headers[idempotency_header] = options.idempotency_key or self._idempotency_key()
422422

423-
# Don't set the retry count header if it was already set or removed by the caller. We check
423+
# Don't set these headers if they were already set or removed by the caller. We check
424424
# `custom_headers`, which can contain `Omit()`, instead of `headers` to account for the removal case.
425-
if "x-stainless-retry-count" not in (header.lower() for header in custom_headers):
425+
lower_custom_headers = [header.lower() for header in custom_headers]
426+
if "x-stainless-retry-count" not in lower_custom_headers:
426427
headers["x-stainless-retry-count"] = str(retries_taken)
428+
if "x-stainless-read-timeout" not in lower_custom_headers:
429+
timeout = self.timeout if isinstance(options.timeout, NotGiven) else options.timeout
430+
if isinstance(timeout, Timeout):
431+
timeout = timeout.read
432+
if timeout is not None:
433+
headers["x-stainless-read-timeout"] = str(timeout)
427434

428435
return headers
429436

Diff for: src/openai/_models.py

+7-1
Original file line numberDiff line numberDiff line change
@@ -451,10 +451,16 @@ def construct_type(*, value: object, type_: object) -> object:
451451
452452
If the given value does not match the expected type then it is returned as-is.
453453
"""
454+
455+
# store a reference to the original type we were given before we extract any inner
456+
# types so that we can properly resolve forward references in `TypeAliasType` annotations
457+
original_type = None
458+
454459
# we allow `object` as the input type because otherwise, passing things like
455460
# `Literal['value']` will be reported as a type error by type checkers
456461
type_ = cast("type[object]", type_)
457462
if is_type_alias_type(type_):
463+
original_type = type_ # type: ignore[unreachable]
458464
type_ = type_.__value__ # type: ignore[unreachable]
459465

460466
# unwrap `Annotated[T, ...]` -> `T`
@@ -471,7 +477,7 @@ def construct_type(*, value: object, type_: object) -> object:
471477

472478
if is_union(origin):
473479
try:
474-
return validate_type(type_=cast("type[object]", type_), value=value)
480+
return validate_type(type_=cast("type[object]", original_type or type_), value=value)
475481
except Exception:
476482
pass
477483

Diff for: src/openai/_utils/_transform.py

+11-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
is_annotated_type,
2626
strip_annotated_type,
2727
)
28-
from .._compat import model_dump, is_typeddict
28+
from .._compat import get_origin, model_dump, is_typeddict
2929

3030
_T = TypeVar("_T")
3131

@@ -164,9 +164,14 @@ def _transform_recursive(
164164
inner_type = annotation
165165

166166
stripped_type = strip_annotated_type(inner_type)
167+
origin = get_origin(stripped_type) or stripped_type
167168
if is_typeddict(stripped_type) and is_mapping(data):
168169
return _transform_typeddict(data, stripped_type)
169170

171+
if origin == dict and is_mapping(data):
172+
items_type = get_args(stripped_type)[1]
173+
return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()}
174+
170175
if (
171176
# List[T]
172177
(is_list_type(stripped_type) and is_list(data))
@@ -307,9 +312,14 @@ async def _async_transform_recursive(
307312
inner_type = annotation
308313

309314
stripped_type = strip_annotated_type(inner_type)
315+
origin = get_origin(stripped_type) or stripped_type
310316
if is_typeddict(stripped_type) and is_mapping(data):
311317
return await _async_transform_typeddict(data, stripped_type)
312318

319+
if origin == dict and is_mapping(data):
320+
items_type = get_args(stripped_type)[1]
321+
return {key: _transform_recursive(value, annotation=items_type) for key, value in data.items()}
322+
313323
if (
314324
# List[T]
315325
(is_list_type(stripped_type) and is_list(data))

Diff for: src/openai/_version.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

33
__title__ = "openai"
4-
__version__ = "1.61.1" # x-release-please-version
4+
__version__ = "1.62.0" # x-release-please-version

Diff for: src/openai/lib/_parsing/_completions.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ def parse_chat_completion(
111111
response_format=response_format,
112112
message=message,
113113
),
114-
"tool_calls": tool_calls,
114+
"tool_calls": tool_calls if tool_calls else None,
115115
},
116116
},
117117
)

Diff for: src/openai/pagination.py

+18
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ def next_page_info(self) -> None:
6161

6262
class SyncCursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
6363
data: List[_T]
64+
has_more: Optional[bool] = None
6465

6566
@override
6667
def _get_page_items(self) -> List[_T]:
@@ -69,6 +70,14 @@ def _get_page_items(self) -> List[_T]:
6970
return []
7071
return data
7172

73+
@override
74+
def has_next_page(self) -> bool:
75+
has_more = self.has_more
76+
if has_more is not None and has_more is False:
77+
return False
78+
79+
return super().has_next_page()
80+
7281
@override
7382
def next_page_info(self) -> Optional[PageInfo]:
7483
data = self.data
@@ -85,6 +94,7 @@ def next_page_info(self) -> Optional[PageInfo]:
8594

8695
class AsyncCursorPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
8796
data: List[_T]
97+
has_more: Optional[bool] = None
8898

8999
@override
90100
def _get_page_items(self) -> List[_T]:
@@ -93,6 +103,14 @@ def _get_page_items(self) -> List[_T]:
93103
return []
94104
return data
95105

106+
@override
107+
def has_next_page(self) -> bool:
108+
has_more = self.has_more
109+
if has_more is not None and has_more is False:
110+
return False
111+
112+
return super().has_next_page()
113+
96114
@override
97115
def next_page_info(self) -> Optional[PageInfo]:
98116
data = self.data

Diff for: src/openai/resources/beta/assistants.py

+104-2
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ def create(
6161
instructions: Optional[str] | NotGiven = NOT_GIVEN,
6262
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
6363
name: Optional[str] | NotGiven = NOT_GIVEN,
64+
reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
6465
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
6566
temperature: Optional[float] | NotGiven = NOT_GIVEN,
6667
tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -97,6 +98,13 @@ def create(
9798
9899
name: The name of the assistant. The maximum length is 256 characters.
99100
101+
reasoning_effort: **o1 and o3-mini models only**
102+
103+
Constrains effort on reasoning for
104+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
105+
supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
106+
result in faster responses and fewer tokens used on reasoning in a response.
107+
100108
response_format: Specifies the format that the model must output. Compatible with
101109
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
102110
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -155,6 +163,7 @@ def create(
155163
"instructions": instructions,
156164
"metadata": metadata,
157165
"name": name,
166+
"reasoning_effort": reasoning_effort,
158167
"response_format": response_format,
159168
"temperature": temperature,
160169
"tool_resources": tool_resources,
@@ -210,8 +219,42 @@ def update(
210219
description: Optional[str] | NotGiven = NOT_GIVEN,
211220
instructions: Optional[str] | NotGiven = NOT_GIVEN,
212221
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
213-
model: str | NotGiven = NOT_GIVEN,
222+
model: Union[
223+
str,
224+
Literal[
225+
"o3-mini",
226+
"o3-mini-2025-01-31",
227+
"o1",
228+
"o1-2024-12-17",
229+
"gpt-4o",
230+
"gpt-4o-2024-11-20",
231+
"gpt-4o-2024-08-06",
232+
"gpt-4o-2024-05-13",
233+
"gpt-4o-mini",
234+
"gpt-4o-mini-2024-07-18",
235+
"gpt-4-turbo",
236+
"gpt-4-turbo-2024-04-09",
237+
"gpt-4-0125-preview",
238+
"gpt-4-turbo-preview",
239+
"gpt-4-1106-preview",
240+
"gpt-4-vision-preview",
241+
"gpt-4",
242+
"gpt-4-0314",
243+
"gpt-4-0613",
244+
"gpt-4-32k",
245+
"gpt-4-32k-0314",
246+
"gpt-4-32k-0613",
247+
"gpt-3.5-turbo",
248+
"gpt-3.5-turbo-16k",
249+
"gpt-3.5-turbo-0613",
250+
"gpt-3.5-turbo-1106",
251+
"gpt-3.5-turbo-0125",
252+
"gpt-3.5-turbo-16k-0613",
253+
],
254+
]
255+
| NotGiven = NOT_GIVEN,
214256
name: Optional[str] | NotGiven = NOT_GIVEN,
257+
reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
215258
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
216259
temperature: Optional[float] | NotGiven = NOT_GIVEN,
217260
tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -249,6 +292,13 @@ def update(
249292
250293
name: The name of the assistant. The maximum length is 256 characters.
251294
295+
reasoning_effort: **o1 and o3-mini models only**
296+
297+
Constrains effort on reasoning for
298+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
299+
supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
300+
result in faster responses and fewer tokens used on reasoning in a response.
301+
252302
response_format: Specifies the format that the model must output. Compatible with
253303
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
254304
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -309,6 +359,7 @@ def update(
309359
"metadata": metadata,
310360
"model": model,
311361
"name": name,
362+
"reasoning_effort": reasoning_effort,
312363
"response_format": response_format,
313364
"temperature": temperature,
314365
"tool_resources": tool_resources,
@@ -451,6 +502,7 @@ async def create(
451502
instructions: Optional[str] | NotGiven = NOT_GIVEN,
452503
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
453504
name: Optional[str] | NotGiven = NOT_GIVEN,
505+
reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
454506
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
455507
temperature: Optional[float] | NotGiven = NOT_GIVEN,
456508
tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -487,6 +539,13 @@ async def create(
487539
488540
name: The name of the assistant. The maximum length is 256 characters.
489541
542+
reasoning_effort: **o1 and o3-mini models only**
543+
544+
Constrains effort on reasoning for
545+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
546+
supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
547+
result in faster responses and fewer tokens used on reasoning in a response.
548+
490549
response_format: Specifies the format that the model must output. Compatible with
491550
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
492551
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -545,6 +604,7 @@ async def create(
545604
"instructions": instructions,
546605
"metadata": metadata,
547606
"name": name,
607+
"reasoning_effort": reasoning_effort,
548608
"response_format": response_format,
549609
"temperature": temperature,
550610
"tool_resources": tool_resources,
@@ -600,8 +660,42 @@ async def update(
600660
description: Optional[str] | NotGiven = NOT_GIVEN,
601661
instructions: Optional[str] | NotGiven = NOT_GIVEN,
602662
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
603-
model: str | NotGiven = NOT_GIVEN,
663+
model: Union[
664+
str,
665+
Literal[
666+
"o3-mini",
667+
"o3-mini-2025-01-31",
668+
"o1",
669+
"o1-2024-12-17",
670+
"gpt-4o",
671+
"gpt-4o-2024-11-20",
672+
"gpt-4o-2024-08-06",
673+
"gpt-4o-2024-05-13",
674+
"gpt-4o-mini",
675+
"gpt-4o-mini-2024-07-18",
676+
"gpt-4-turbo",
677+
"gpt-4-turbo-2024-04-09",
678+
"gpt-4-0125-preview",
679+
"gpt-4-turbo-preview",
680+
"gpt-4-1106-preview",
681+
"gpt-4-vision-preview",
682+
"gpt-4",
683+
"gpt-4-0314",
684+
"gpt-4-0613",
685+
"gpt-4-32k",
686+
"gpt-4-32k-0314",
687+
"gpt-4-32k-0613",
688+
"gpt-3.5-turbo",
689+
"gpt-3.5-turbo-16k",
690+
"gpt-3.5-turbo-0613",
691+
"gpt-3.5-turbo-1106",
692+
"gpt-3.5-turbo-0125",
693+
"gpt-3.5-turbo-16k-0613",
694+
],
695+
]
696+
| NotGiven = NOT_GIVEN,
604697
name: Optional[str] | NotGiven = NOT_GIVEN,
698+
reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
605699
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
606700
temperature: Optional[float] | NotGiven = NOT_GIVEN,
607701
tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -639,6 +733,13 @@ async def update(
639733
640734
name: The name of the assistant. The maximum length is 256 characters.
641735
736+
reasoning_effort: **o1 and o3-mini models only**
737+
738+
Constrains effort on reasoning for
739+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
740+
supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
741+
result in faster responses and fewer tokens used on reasoning in a response.
742+
642743
response_format: Specifies the format that the model must output. Compatible with
643744
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
644745
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -699,6 +800,7 @@ async def update(
699800
"metadata": metadata,
700801
"model": model,
701802
"name": name,
803+
"reasoning_effort": reasoning_effort,
702804
"response_format": response_format,
703805
"temperature": temperature,
704806
"tool_resources": tool_resources,

0 commit comments

Comments
 (0)