diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index e5c9603757..397c4203e3 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.21.2"
+ ".": "1.22.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 2814bb7778..c9a9bfa4a8 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1 +1 @@
-configured_endpoints: 62
+configured_endpoints: 63
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 889e26f7d3..ee52ac72e0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,19 @@
# Changelog
+## 1.22.0 (2024-04-18)
+
+Full Changelog: [v1.21.2...v1.22.0](https://github.com/openai/openai-python/compare/v1.21.2...v1.22.0)
+
+### Features
+
+* **api:** batch list endpoint ([#1338](https://github.com/openai/openai-python/issues/1338)) ([a776f38](https://github.com/openai/openai-python/commit/a776f387e3159f9a8f4dcaa7d0d3b78c2a884f91))
+
+
+### Chores
+
+* **internal:** ban usage of lru_cache ([#1331](https://github.com/openai/openai-python/issues/1331)) ([8f9223b](https://github.com/openai/openai-python/commit/8f9223bfe13200c685fc97c25ada3015a69c6df7))
+* **internal:** bump pyright to 1.1.359 ([#1337](https://github.com/openai/openai-python/issues/1337)) ([feec0dd](https://github.com/openai/openai-python/commit/feec0dd1dd243941a279c3224c5ca1d727d76676))
+
## 1.21.2 (2024-04-17)
Full Changelog: [v1.21.1...v1.21.2](https://github.com/openai/openai-python/compare/v1.21.1...v1.21.2)
diff --git a/api.md b/api.md
index 962ed7b7c5..30247e8f7f 100644
--- a/api.md
+++ b/api.md
@@ -405,4 +405,5 @@ Methods:
- client.batches.create(\*\*params) -> Batch
- client.batches.retrieve(batch_id) -> Batch
+- client.batches.list(\*\*params) -> SyncCursorPage[Batch]
- client.batches.cancel(batch_id) -> Batch
diff --git a/pyproject.toml b/pyproject.toml
index b593179128..17f4a86dc9 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "openai"
-version = "1.21.2"
+version = "1.22.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
@@ -50,7 +50,7 @@ openai = "openai.cli:main"
managed = true
# version pins are in requirements-dev.lock
dev-dependencies = [
- "pyright",
+ "pyright>=1.1.359",
"mypy",
"respx",
"pytest",
@@ -167,7 +167,9 @@ select = [
"T201",
"T203",
# misuse of typing.TYPE_CHECKING
- "TCH004"
+ "TCH004",
+ # import rules
+ "TID251",
]
ignore = [
# mutable defaults
@@ -183,6 +185,9 @@ ignore-init-module-imports = true
[tool.ruff.format]
docstring-code-format = true
+[tool.ruff.lint.flake8-tidy-imports.banned-api]
+"functools.lru_cache".msg = "This function does not retain type information for the wrapped function's arguments; The `lru_cache` function from `_utils` should be used instead"
+
[tool.ruff.lint.isort]
length-sort = true
length-sort-straight = true
diff --git a/requirements-dev.lock b/requirements-dev.lock
index 657e6cb810..8cfefdd93b 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -24,7 +24,7 @@ attrs==23.1.0
azure-core==1.30.1
# via azure-identity
azure-identity==1.15.0
-black==24.3.0
+black==24.4.0
# via inline-snapshot
certifi==2023.7.22
# via httpcore
@@ -109,7 +109,7 @@ portalocker==2.8.2
# via msal-extensions
py==1.11.0
# via pytest
-pycparser==2.21
+pycparser==2.22
# via cffi
pydantic==2.4.2
# via openai
@@ -117,7 +117,7 @@ pydantic-core==2.10.1
# via pydantic
pyjwt==2.8.0
# via msal
-pyright==1.1.353
+pyright==1.1.359
pytest==7.1.1
# via pytest-asyncio
pytest-asyncio==0.21.1
@@ -156,7 +156,7 @@ tqdm==4.66.1
# via openai
trio==0.22.2
types-pyaudio==0.2.16.20240106
-types-pytz==2024.1.0.20240203
+types-pytz==2024.1.0.20240417
# via pandas-stubs
types-toml==0.10.8.20240310
# via inline-snapshot
diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py
index 0bb284a211..cd8361607e 100644
--- a/src/openai/_base_client.py
+++ b/src/openai/_base_client.py
@@ -29,7 +29,6 @@
cast,
overload,
)
-from functools import lru_cache
from typing_extensions import Literal, override, get_origin
import anyio
@@ -61,7 +60,7 @@
RequestOptions,
ModelBuilderProtocol,
)
-from ._utils import is_dict, is_list, is_given, is_mapping
+from ._utils import is_dict, is_list, is_given, lru_cache, is_mapping
from ._compat import model_copy, model_dump
from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type
from ._response import (
diff --git a/src/openai/_models.py b/src/openai/_models.py
index 80ab51256f..ff3f54e2cd 100644
--- a/src/openai/_models.py
+++ b/src/openai/_models.py
@@ -4,7 +4,6 @@
import inspect
from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, cast
from datetime import date, datetime
-from functools import lru_cache
from typing_extensions import (
Unpack,
Literal,
@@ -37,6 +36,7 @@
PropertyInfo,
is_list,
is_given,
+ lru_cache,
is_mapping,
parse_date,
coerce_boolean,
@@ -378,7 +378,7 @@ def construct_type(*, value: object, type_: object) -> object:
# unwrap `Annotated[T, ...]` -> `T`
if is_annotated_type(type_):
- meta = get_args(type_)[1:]
+ meta: tuple[Any, ...] = get_args(type_)[1:]
type_ = extract_type_arg(type_, 0)
else:
meta = tuple()
diff --git a/src/openai/_utils/_utils.py b/src/openai/_utils/_utils.py
index 5123a230f1..17904ce60d 100644
--- a/src/openai/_utils/_utils.py
+++ b/src/openai/_utils/_utils.py
@@ -265,6 +265,8 @@ def wrapper(*args: object, **kwargs: object) -> object:
)
msg = f"Missing required arguments; Expected either {variations} arguments to be given"
else:
+ assert len(variants) > 0
+
# TODO: this error message is not deterministic
missing = list(set(variants[0]) - given_params)
if len(missing) > 1:
@@ -395,5 +397,7 @@ def lru_cache(*, maxsize: int | None = 128) -> Callable[[CallableT], CallableT]:
"""A version of functools.lru_cache that retains the type signature
for the wrapped function arguments.
"""
- wrapper = functools.lru_cache(maxsize=maxsize)
+ wrapper = functools.lru_cache( # noqa: TID251
+ maxsize=maxsize,
+ )
return cast(Any, wrapper) # type: ignore[no-any-return]
diff --git a/src/openai/_version.py b/src/openai/_version.py
index df70bd1a2c..6e11c61a18 100644
--- a/src/openai/_version.py
+++ b/src/openai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "openai"
-__version__ = "1.21.2" # x-release-please-version
+__version__ = "1.22.0" # x-release-please-version
diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py
index 0921ccb194..dc311b2e12 100644
--- a/src/openai/resources/batches.py
+++ b/src/openai/resources/batches.py
@@ -8,7 +8,7 @@
import httpx
from .. import _legacy_response
-from ..types import Batch, batch_create_params
+from ..types import Batch, batch_list_params, batch_create_params
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from .._utils import (
maybe_transform,
@@ -17,7 +17,9 @@
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ..pagination import SyncCursorPage, AsyncCursorPage
from .._base_client import (
+ AsyncPaginator,
make_request_options,
)
@@ -125,6 +127,58 @@ def retrieve(
cast_to=Batch,
)
+ def list(
+ self,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncCursorPage[Batch]:
+ """List your organization's batches.
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/batches",
+ page=SyncCursorPage[Batch],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ },
+ batch_list_params.BatchListParams,
+ ),
+ ),
+ model=Batch,
+ )
+
def cancel(
self,
batch_id: str,
@@ -260,6 +314,58 @@ async def retrieve(
cast_to=Batch,
)
+ def list(
+ self,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[Batch, AsyncCursorPage[Batch]]:
+ """List your organization's batches.
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/batches",
+ page=AsyncCursorPage[Batch],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ },
+ batch_list_params.BatchListParams,
+ ),
+ ),
+ model=Batch,
+ )
+
async def cancel(
self,
batch_id: str,
@@ -304,6 +410,9 @@ def __init__(self, batches: Batches) -> None:
self.retrieve = _legacy_response.to_raw_response_wrapper(
batches.retrieve,
)
+ self.list = _legacy_response.to_raw_response_wrapper(
+ batches.list,
+ )
self.cancel = _legacy_response.to_raw_response_wrapper(
batches.cancel,
)
@@ -319,6 +428,9 @@ def __init__(self, batches: AsyncBatches) -> None:
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
batches.retrieve,
)
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ batches.list,
+ )
self.cancel = _legacy_response.async_to_raw_response_wrapper(
batches.cancel,
)
@@ -334,6 +446,9 @@ def __init__(self, batches: Batches) -> None:
self.retrieve = to_streamed_response_wrapper(
batches.retrieve,
)
+ self.list = to_streamed_response_wrapper(
+ batches.list,
+ )
self.cancel = to_streamed_response_wrapper(
batches.cancel,
)
@@ -349,6 +464,9 @@ def __init__(self, batches: AsyncBatches) -> None:
self.retrieve = async_to_streamed_response_wrapper(
batches.retrieve,
)
+ self.list = async_to_streamed_response_wrapper(
+ batches.list,
+ )
self.cancel = async_to_streamed_response_wrapper(
batches.cancel,
)
diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py
index b6f35cfecf..7873efb34f 100644
--- a/src/openai/types/__init__.py
+++ b/src/openai/types/__init__.py
@@ -22,6 +22,7 @@
from .images_response import ImagesResponse as ImagesResponse
from .completion_usage import CompletionUsage as CompletionUsage
from .file_list_params import FileListParams as FileListParams
+from .batch_list_params import BatchListParams as BatchListParams
from .completion_choice import CompletionChoice as CompletionChoice
from .image_edit_params import ImageEditParams as ImageEditParams
from .file_create_params import FileCreateParams as FileCreateParams
diff --git a/src/openai/types/batch_list_params.py b/src/openai/types/batch_list_params.py
new file mode 100644
index 0000000000..ef5e966b79
--- /dev/null
+++ b/src/openai/types/batch_list_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["BatchListParams"]
+
+
+class BatchListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py
index aafeff8116..6f9b598e61 100644
--- a/tests/api_resources/test_batches.py
+++ b/tests/api_resources/test_batches.py
@@ -10,6 +10,7 @@
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.types import Batch
+from openai.pagination import SyncCursorPage, AsyncCursorPage
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -102,6 +103,39 @@ def test_path_params_retrieve(self, client: OpenAI) -> None:
"",
)
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ batch = client.batches.list()
+ assert_matches_type(SyncCursorPage[Batch], batch, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ batch = client.batches.list(
+ after="string",
+ limit=0,
+ )
+ assert_matches_type(SyncCursorPage[Batch], batch, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.batches.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ batch = response.parse()
+ assert_matches_type(SyncCursorPage[Batch], batch, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.batches.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ batch = response.parse()
+ assert_matches_type(SyncCursorPage[Batch], batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
@parametrize
def test_method_cancel(self, client: OpenAI) -> None:
batch = client.batches.cancel(
@@ -229,6 +263,39 @@ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
"",
)
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ batch = await async_client.batches.list()
+ assert_matches_type(AsyncCursorPage[Batch], batch, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ batch = await async_client.batches.list(
+ after="string",
+ limit=0,
+ )
+ assert_matches_type(AsyncCursorPage[Batch], batch, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.batches.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ batch = response.parse()
+ assert_matches_type(AsyncCursorPage[Batch], batch, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.batches.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ batch = await response.parse()
+ assert_matches_type(AsyncCursorPage[Batch], batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
@parametrize
async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
batch = await async_client.batches.cancel(