Skip to content

Commit 1ec5ed0

Browse files
authored
Merge branch 'main' into ignore-azure-async-filter-events
2 parents de2fd2c + 0c8343b commit 1ec5ed0

File tree

72 files changed

+6313
-29
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

72 files changed

+6313
-29
lines changed

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "1.71.0"
2+
".": "1.72.0"
33
}

.stats.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
configured_endpoints: 82
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4bce8217a697c729ac98046d4caf2c9e826b54c427fb0ab4f98e549a2e0ce31c.yml
3-
openapi_spec_hash: 7996d2c34cc44fe2ce9ffe93c0ab774e
4-
config_hash: bcd2cacdcb9fae9938f273cd167f613c
1+
configured_endpoints: 97
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-32de3bc513663c5fac922c49be41c222b6ee8c0b841d8966bcdfa489d441daa3.yml
3+
openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6
4+
config_hash: 43dc8df20ffec9d1503f91866cb2b7d9

CHANGELOG.md

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,21 @@
11
# Changelog
22

3+
## 1.72.0 (2025-04-08)
4+
5+
Full Changelog: [v1.71.0...v1.72.0](https://github.com/openai/openai-python/compare/v1.71.0...v1.72.0)
6+
7+
### Features
8+
9+
* **api:** Add evalapi to sdk ([#2287](https://github.com/openai/openai-python/issues/2287)) ([35262fc](https://github.com/openai/openai-python/commit/35262fcef6ccb7d1f75c9abdfdc68c3dcf87ef53))
10+
11+
12+
### Chores
13+
14+
* **internal:** fix examples ([#2288](https://github.com/openai/openai-python/issues/2288)) ([39defd6](https://github.com/openai/openai-python/commit/39defd61e81ea0ec6b898be12e9fb7e621c0e532))
15+
* **internal:** skip broken test ([#2289](https://github.com/openai/openai-python/issues/2289)) ([e2c9bce](https://github.com/openai/openai-python/commit/e2c9bce1f59686ee053b495d06ea118b4a89e09e))
16+
* **internal:** slight transform perf improvement ([#2284](https://github.com/openai/openai-python/issues/2284)) ([746174f](https://github.com/openai/openai-python/commit/746174fae7a018ece5dab54fb0b5a15fcdd18f2f))
17+
* **tests:** improve enum examples ([#2286](https://github.com/openai/openai-python/issues/2286)) ([c9dd81c](https://github.com/openai/openai-python/commit/c9dd81ce0277e8b1f5db5e0a39c4c2bcd9004bcc))
18+
319
## 1.71.0 (2025-04-07)
420

521
Full Changelog: [v1.70.0...v1.71.0](https://github.com/openai/openai-python/compare/v1.70.0...v1.71.0)

api.md

Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -259,6 +259,26 @@ Methods:
259259

260260
- <code title="get /fine_tuning/jobs/{fine_tuning_job_id}/checkpoints">client.fine_tuning.jobs.checkpoints.<a href="./src/openai/resources/fine_tuning/jobs/checkpoints.py">list</a>(fine_tuning_job_id, \*\*<a href="src/openai/types/fine_tuning/jobs/checkpoint_list_params.py">params</a>) -> <a href="./src/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py">SyncCursorPage[FineTuningJobCheckpoint]</a></code>
261261

262+
## Checkpoints
263+
264+
### Permissions
265+
266+
Types:
267+
268+
```python
269+
from openai.types.fine_tuning.checkpoints import (
270+
PermissionCreateResponse,
271+
PermissionRetrieveResponse,
272+
PermissionDeleteResponse,
273+
)
274+
```
275+
276+
Methods:
277+
278+
- <code title="post /fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions">client.fine_tuning.checkpoints.permissions.<a href="./src/openai/resources/fine_tuning/checkpoints/permissions.py">create</a>(fine_tuned_model_checkpoint, \*\*<a href="src/openai/types/fine_tuning/checkpoints/permission_create_params.py">params</a>) -> <a href="./src/openai/types/fine_tuning/checkpoints/permission_create_response.py">SyncPage[PermissionCreateResponse]</a></code>
279+
- <code title="get /fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions">client.fine_tuning.checkpoints.permissions.<a href="./src/openai/resources/fine_tuning/checkpoints/permissions.py">retrieve</a>(fine_tuned_model_checkpoint, \*\*<a href="src/openai/types/fine_tuning/checkpoints/permission_retrieve_params.py">params</a>) -> <a href="./src/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py">PermissionRetrieveResponse</a></code>
280+
- <code title="delete /fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions">client.fine_tuning.checkpoints.permissions.<a href="./src/openai/resources/fine_tuning/checkpoints/permissions.py">delete</a>(fine_tuned_model_checkpoint) -> <a href="./src/openai/types/fine_tuning/checkpoints/permission_delete_response.py">PermissionDeleteResponse</a></code>
281+
262282
# VectorStores
263283

264284
Types:
@@ -706,3 +726,68 @@ from openai.types.responses import ResponseItemList
706726
Methods:
707727

708728
- <code title="get /responses/{response_id}/input_items">client.responses.input_items.<a href="./src/openai/resources/responses/input_items.py">list</a>(response_id, \*\*<a href="src/openai/types/responses/input_item_list_params.py">params</a>) -> <a href="./src/openai/types/responses/response_item.py">SyncCursorPage[ResponseItem]</a></code>
729+
730+
# Evals
731+
732+
Types:
733+
734+
```python
735+
from openai.types import (
736+
EvalCustomDataSourceConfig,
737+
EvalLabelModelGrader,
738+
EvalStoredCompletionsDataSourceConfig,
739+
EvalStringCheckGrader,
740+
EvalTextSimilarityGrader,
741+
EvalCreateResponse,
742+
EvalRetrieveResponse,
743+
EvalUpdateResponse,
744+
EvalListResponse,
745+
EvalDeleteResponse,
746+
)
747+
```
748+
749+
Methods:
750+
751+
- <code title="post /evals">client.evals.<a href="./src/openai/resources/evals/evals.py">create</a>(\*\*<a href="src/openai/types/eval_create_params.py">params</a>) -> <a href="./src/openai/types/eval_create_response.py">EvalCreateResponse</a></code>
752+
- <code title="get /evals/{eval_id}">client.evals.<a href="./src/openai/resources/evals/evals.py">retrieve</a>(eval_id) -> <a href="./src/openai/types/eval_retrieve_response.py">EvalRetrieveResponse</a></code>
753+
- <code title="post /evals/{eval_id}">client.evals.<a href="./src/openai/resources/evals/evals.py">update</a>(eval_id, \*\*<a href="src/openai/types/eval_update_params.py">params</a>) -> <a href="./src/openai/types/eval_update_response.py">EvalUpdateResponse</a></code>
754+
- <code title="get /evals">client.evals.<a href="./src/openai/resources/evals/evals.py">list</a>(\*\*<a href="src/openai/types/eval_list_params.py">params</a>) -> <a href="./src/openai/types/eval_list_response.py">SyncCursorPage[EvalListResponse]</a></code>
755+
- <code title="delete /evals/{eval_id}">client.evals.<a href="./src/openai/resources/evals/evals.py">delete</a>(eval_id) -> <a href="./src/openai/types/eval_delete_response.py">EvalDeleteResponse</a></code>
756+
757+
## Runs
758+
759+
Types:
760+
761+
```python
762+
from openai.types.evals import (
763+
CreateEvalCompletionsRunDataSource,
764+
CreateEvalJSONLRunDataSource,
765+
EvalAPIError,
766+
RunCreateResponse,
767+
RunRetrieveResponse,
768+
RunListResponse,
769+
RunDeleteResponse,
770+
RunCancelResponse,
771+
)
772+
```
773+
774+
Methods:
775+
776+
- <code title="post /evals/{eval_id}/runs">client.evals.runs.<a href="./src/openai/resources/evals/runs/runs.py">create</a>(eval_id, \*\*<a href="src/openai/types/evals/run_create_params.py">params</a>) -> <a href="./src/openai/types/evals/run_create_response.py">RunCreateResponse</a></code>
777+
- <code title="get /evals/{eval_id}/runs/{run_id}">client.evals.runs.<a href="./src/openai/resources/evals/runs/runs.py">retrieve</a>(run_id, \*, eval_id) -> <a href="./src/openai/types/evals/run_retrieve_response.py">RunRetrieveResponse</a></code>
778+
- <code title="get /evals/{eval_id}/runs">client.evals.runs.<a href="./src/openai/resources/evals/runs/runs.py">list</a>(eval_id, \*\*<a href="src/openai/types/evals/run_list_params.py">params</a>) -> <a href="./src/openai/types/evals/run_list_response.py">SyncCursorPage[RunListResponse]</a></code>
779+
- <code title="delete /evals/{eval_id}/runs/{run_id}">client.evals.runs.<a href="./src/openai/resources/evals/runs/runs.py">delete</a>(run_id, \*, eval_id) -> <a href="./src/openai/types/evals/run_delete_response.py">RunDeleteResponse</a></code>
780+
- <code title="post /evals/{eval_id}/runs/{run_id}">client.evals.runs.<a href="./src/openai/resources/evals/runs/runs.py">cancel</a>(run_id, \*, eval_id) -> <a href="./src/openai/types/evals/run_cancel_response.py">RunCancelResponse</a></code>
781+
782+
### OutputItems
783+
784+
Types:
785+
786+
```python
787+
from openai.types.evals.runs import OutputItemRetrieveResponse, OutputItemListResponse
788+
```
789+
790+
Methods:
791+
792+
- <code title="get /evals/{eval_id}/runs/{run_id}/output_items/{output_item_id}">client.evals.runs.output_items.<a href="./src/openai/resources/evals/runs/output_items.py">retrieve</a>(output_item_id, \*, eval_id, run_id) -> <a href="./src/openai/types/evals/runs/output_item_retrieve_response.py">OutputItemRetrieveResponse</a></code>
793+
- <code title="get /evals/{eval_id}/runs/{run_id}/output_items">client.evals.runs.output_items.<a href="./src/openai/resources/evals/runs/output_items.py">list</a>(run_id, \*, eval_id, \*\*<a href="src/openai/types/evals/runs/output_item_list_params.py">params</a>) -> <a href="./src/openai/types/evals/runs/output_item_list_response.py">SyncCursorPage[OutputItemListResponse]</a></code>

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "openai"
3-
version = "1.71.0"
3+
version = "1.72.0"
44
description = "The official Python library for the openai API"
55
dynamic = ["readme"]
66
license = "Apache-2.0"

src/openai/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -352,6 +352,7 @@ def _reset_client() -> None: # type: ignore[reportUnusedFunction]
352352
beta as beta,
353353
chat as chat,
354354
audio as audio,
355+
evals as evals,
355356
files as files,
356357
images as images,
357358
models as models,

src/openai/_client.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
from .resources.beta import beta
3737
from .resources.chat import chat
3838
from .resources.audio import audio
39+
from .resources.evals import evals
3940
from .resources.uploads import uploads
4041
from .resources.responses import responses
4142
from .resources.fine_tuning import fine_tuning
@@ -59,6 +60,7 @@ class OpenAI(SyncAPIClient):
5960
batches: batches.Batches
6061
uploads: uploads.Uploads
6162
responses: responses.Responses
63+
evals: evals.Evals
6264
with_raw_response: OpenAIWithRawResponse
6365
with_streaming_response: OpenAIWithStreamedResponse
6466

@@ -158,6 +160,7 @@ def __init__(
158160
self.batches = batches.Batches(self)
159161
self.uploads = uploads.Uploads(self)
160162
self.responses = responses.Responses(self)
163+
self.evals = evals.Evals(self)
161164
self.with_raw_response = OpenAIWithRawResponse(self)
162165
self.with_streaming_response = OpenAIWithStreamedResponse(self)
163166

@@ -290,6 +293,7 @@ class AsyncOpenAI(AsyncAPIClient):
290293
batches: batches.AsyncBatches
291294
uploads: uploads.AsyncUploads
292295
responses: responses.AsyncResponses
296+
evals: evals.AsyncEvals
293297
with_raw_response: AsyncOpenAIWithRawResponse
294298
with_streaming_response: AsyncOpenAIWithStreamedResponse
295299

@@ -389,6 +393,7 @@ def __init__(
389393
self.batches = batches.AsyncBatches(self)
390394
self.uploads = uploads.AsyncUploads(self)
391395
self.responses = responses.AsyncResponses(self)
396+
self.evals = evals.AsyncEvals(self)
392397
self.with_raw_response = AsyncOpenAIWithRawResponse(self)
393398
self.with_streaming_response = AsyncOpenAIWithStreamedResponse(self)
394399

@@ -522,6 +527,7 @@ def __init__(self, client: OpenAI) -> None:
522527
self.batches = batches.BatchesWithRawResponse(client.batches)
523528
self.uploads = uploads.UploadsWithRawResponse(client.uploads)
524529
self.responses = responses.ResponsesWithRawResponse(client.responses)
530+
self.evals = evals.EvalsWithRawResponse(client.evals)
525531

526532

527533
class AsyncOpenAIWithRawResponse:
@@ -540,6 +546,7 @@ def __init__(self, client: AsyncOpenAI) -> None:
540546
self.batches = batches.AsyncBatchesWithRawResponse(client.batches)
541547
self.uploads = uploads.AsyncUploadsWithRawResponse(client.uploads)
542548
self.responses = responses.AsyncResponsesWithRawResponse(client.responses)
549+
self.evals = evals.AsyncEvalsWithRawResponse(client.evals)
543550

544551

545552
class OpenAIWithStreamedResponse:
@@ -558,6 +565,7 @@ def __init__(self, client: OpenAI) -> None:
558565
self.batches = batches.BatchesWithStreamingResponse(client.batches)
559566
self.uploads = uploads.UploadsWithStreamingResponse(client.uploads)
560567
self.responses = responses.ResponsesWithStreamingResponse(client.responses)
568+
self.evals = evals.EvalsWithStreamingResponse(client.evals)
561569

562570

563571
class AsyncOpenAIWithStreamedResponse:
@@ -576,6 +584,7 @@ def __init__(self, client: AsyncOpenAI) -> None:
576584
self.batches = batches.AsyncBatchesWithStreamingResponse(client.batches)
577585
self.uploads = uploads.AsyncUploadsWithStreamingResponse(client.uploads)
578586
self.responses = responses.AsyncResponsesWithStreamingResponse(client.responses)
587+
self.evals = evals.AsyncEvalsWithStreamingResponse(client.evals)
579588

580589

581590
Client = OpenAI

src/openai/_module_client.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,12 @@ def __load__(self) -> resources.Audio:
3030
return _load_client().audio
3131

3232

33+
class EvalsProxy(LazyProxy[resources.Evals]):
34+
@override
35+
def __load__(self) -> resources.Evals:
36+
return _load_client().evals
37+
38+
3339
class ImagesProxy(LazyProxy[resources.Images]):
3440
@override
3541
def __load__(self) -> resources.Images:
@@ -94,6 +100,7 @@ def __load__(self) -> resources.VectorStores:
94100
beta: resources.Beta = BetaProxy().__as_proxied__()
95101
files: resources.Files = FilesProxy().__as_proxied__()
96102
audio: resources.Audio = AudioProxy().__as_proxied__()
103+
evals: resources.Evals = EvalsProxy().__as_proxied__()
97104
images: resources.Images = ImagesProxy().__as_proxied__()
98105
models: resources.Models = ModelsProxy().__as_proxied__()
99106
batches: resources.Batches = BatchesProxy().__as_proxied__()

src/openai/_utils/_transform.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,10 @@ def _maybe_transform_key(key: str, type_: type) -> str:
142142
return key
143143

144144

145+
def _no_transform_needed(annotation: type) -> bool:
146+
return annotation == float or annotation == int
147+
148+
145149
def _transform_recursive(
146150
data: object,
147151
*,
@@ -184,6 +188,15 @@ def _transform_recursive(
184188
return cast(object, data)
185189

186190
inner_type = extract_type_arg(stripped_type, 0)
191+
if _no_transform_needed(inner_type):
192+
# for some types there is no need to transform anything, so we can get a small
193+
# perf boost from skipping that work.
194+
#
195+
# but we still need to convert to a list to ensure the data is json-serializable
196+
if is_list(data):
197+
return data
198+
return list(data)
199+
187200
return [_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data]
188201

189202
if is_union_type(stripped_type):
@@ -332,6 +345,15 @@ async def _async_transform_recursive(
332345
return cast(object, data)
333346

334347
inner_type = extract_type_arg(stripped_type, 0)
348+
if _no_transform_needed(inner_type):
349+
# for some types there is no need to transform anything, so we can get a small
350+
# perf boost from skipping that work.
351+
#
352+
# but we still need to convert to a list to ensure the data is json-serializable
353+
if is_list(data):
354+
return data
355+
return list(data)
356+
335357
return [await _async_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data]
336358

337359
if is_union_type(stripped_type):

src/openai/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

33
__title__ = "openai"
4-
__version__ = "1.71.0" # x-release-please-version
4+
__version__ = "1.72.0" # x-release-please-version

src/openai/resources/__init__.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,14 @@
2424
AudioWithStreamingResponse,
2525
AsyncAudioWithStreamingResponse,
2626
)
27+
from .evals import (
28+
Evals,
29+
AsyncEvals,
30+
EvalsWithRawResponse,
31+
AsyncEvalsWithRawResponse,
32+
EvalsWithStreamingResponse,
33+
AsyncEvalsWithStreamingResponse,
34+
)
2735
from .files import (
2836
Files,
2937
AsyncFiles,
@@ -198,4 +206,10 @@
198206
"AsyncResponsesWithRawResponse",
199207
"ResponsesWithStreamingResponse",
200208
"AsyncResponsesWithStreamingResponse",
209+
"Evals",
210+
"AsyncEvals",
211+
"EvalsWithRawResponse",
212+
"AsyncEvalsWithRawResponse",
213+
"EvalsWithStreamingResponse",
214+
"AsyncEvalsWithStreamingResponse",
201215
]
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
from .runs import (
4+
Runs,
5+
AsyncRuns,
6+
RunsWithRawResponse,
7+
AsyncRunsWithRawResponse,
8+
RunsWithStreamingResponse,
9+
AsyncRunsWithStreamingResponse,
10+
)
11+
from .evals import (
12+
Evals,
13+
AsyncEvals,
14+
EvalsWithRawResponse,
15+
AsyncEvalsWithRawResponse,
16+
EvalsWithStreamingResponse,
17+
AsyncEvalsWithStreamingResponse,
18+
)
19+
20+
__all__ = [
21+
"Runs",
22+
"AsyncRuns",
23+
"RunsWithRawResponse",
24+
"AsyncRunsWithRawResponse",
25+
"RunsWithStreamingResponse",
26+
"AsyncRunsWithStreamingResponse",
27+
"Evals",
28+
"AsyncEvals",
29+
"EvalsWithRawResponse",
30+
"AsyncEvalsWithRawResponse",
31+
"EvalsWithStreamingResponse",
32+
"AsyncEvalsWithStreamingResponse",
33+
]

0 commit comments

Comments
 (0)