Skip to content

release: 1.31.1 #1467

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jun 5, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "1.31.0"
".": "1.31.1"
}
8 changes: 8 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
# Changelog

## 1.31.1 (2024-06-05)

Full Changelog: [v1.31.0...v1.31.1](https://github.com/openai/openai-python/compare/v1.31.0...v1.31.1)

### Chores

* **internal:** minor change to tests ([#1466](https://github.com/openai/openai-python/issues/1466)) ([cb33e71](https://github.com/openai/openai-python/commit/cb33e7152f25fb16cf4c39a6e4714169c62d6af8))

## 1.31.0 (2024-06-03)

Full Changelog: [v1.30.5...v1.31.0](https://github.com/openai/openai-python/compare/v1.30.5...v1.31.0)
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "openai"
version = "1.31.0"
version = "1.31.1"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "openai"
__version__ = "1.31.0" # x-release-please-version
__version__ = "1.31.1" # x-release-please-version
16 changes: 8 additions & 8 deletions tests/api_resources/audio/test_speech.py
Original file line number Diff line number Diff line change
@@ -27,7 +27,7 @@ def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = client.audio.speech.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
)
assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
@@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRou
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = client.audio.speech.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
response_format="mp3",
speed=0.25,
@@ -54,7 +54,7 @@ def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> No

response = client.audio.speech.with_raw_response.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
)

@@ -69,7 +69,7 @@ def test_streaming_response_create(self, client: OpenAI, respx_mock: MockRouter)
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
with client.audio.speech.with_streaming_response.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
) as response:
assert not response.is_closed
@@ -90,7 +90,7 @@ async def test_method_create(self, async_client: AsyncOpenAI, respx_mock: MockRo
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = await async_client.audio.speech.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
)
assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
@@ -102,7 +102,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, re
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = await async_client.audio.speech.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
response_format="mp3",
speed=0.25,
@@ -117,7 +117,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI, respx_mock:

response = await async_client.audio.speech.with_raw_response.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
)

@@ -132,7 +132,7 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI, respx_
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
async with async_client.audio.speech.with_streaming_response.create(
input="string",
model="string",
model="tts-1",
voice="alloy",
) as response:
assert not response.is_closed
32 changes: 16 additions & 16 deletions tests/api_resources/test_completions.py
Original file line number Diff line number Diff line change
@@ -20,15 +20,15 @@ class TestCompletions:
@parametrize
def test_method_create_overload_1(self, client: OpenAI) -> None:
completion = client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
)
assert_matches_type(Completion, completion, path=["response"])

@parametrize
def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
completion = client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
best_of=0,
echo=True,
@@ -52,7 +52,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
response = client.completions.with_raw_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
)

@@ -64,7 +64,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
with client.completions.with_streaming_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
) as response:
assert not response.is_closed
@@ -78,7 +78,7 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_method_create_overload_2(self, client: OpenAI) -> None:
completion_stream = client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
)
@@ -87,7 +87,7 @@ def test_method_create_overload_2(self, client: OpenAI) -> None:
@parametrize
def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
completion_stream = client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
best_of=0,
@@ -111,7 +111,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
@parametrize
def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
response = client.completions.with_raw_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
)
@@ -123,7 +123,7 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create_overload_2(self, client: OpenAI) -> None:
with client.completions.with_streaming_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
) as response:
@@ -142,15 +142,15 @@ class TestAsyncCompletions:
@parametrize
async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
)
assert_matches_type(Completion, completion, path=["response"])

@parametrize
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
best_of=0,
echo=True,
@@ -174,7 +174,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
@parametrize
async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
response = await async_client.completions.with_raw_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
)

@@ -186,7 +186,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -
@parametrize
async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
async with async_client.completions.with_streaming_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
) as response:
assert not response.is_closed
@@ -200,7 +200,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe
@parametrize
async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None:
completion_stream = await async_client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
)
@@ -209,7 +209,7 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None
@parametrize
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
completion_stream = await async_client.completions.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
best_of=0,
@@ -233,7 +233,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
@parametrize
async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
response = await async_client.completions.with_raw_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
)
@@ -245,7 +245,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -
@parametrize
async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
async with async_client.completions.with_streaming_response.create(
model="string",
model="gpt-3.5-turbo-instruct",
prompt="This is a test.",
stream=True,
) as response: