Skip to content

Commit d9e9d7a

Browse files
committed
chore(internal): remove redundant client test (#1085)
1 parent 42d1ab6 commit d9e9d7a

File tree

1 file changed

+0
-55
lines changed

1 file changed

+0
-55
lines changed

Diff for: tests/test_client.py

-55
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
from openai import OpenAI, AsyncOpenAI, APIResponseValidationError
2020
from openai._client import OpenAI, AsyncOpenAI
2121
from openai._models import BaseModel, FinalRequestOptions
22-
from openai._response import APIResponse, AsyncAPIResponse
2322
from openai._constants import RAW_RESPONSE_HEADER
2423
from openai._streaming import Stream, AsyncStream
2524
from openai._exceptions import OpenAIError, APIStatusError, APITimeoutError, APIResponseValidationError
@@ -665,33 +664,6 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str
665664
calculated = client._calculate_retry_timeout(remaining_retries, options, headers)
666665
assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType]
667666

668-
@mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
669-
@pytest.mark.respx(base_url=base_url)
670-
def test_streaming_response(self) -> None:
671-
response = self.client.post(
672-
"/chat/completions",
673-
body=dict(
674-
messages=[
675-
{
676-
"role": "user",
677-
"content": "Say this is a test",
678-
}
679-
],
680-
model="gpt-3.5-turbo",
681-
),
682-
cast_to=APIResponse[bytes],
683-
options={"headers": {RAW_RESPONSE_HEADER: "stream"}},
684-
)
685-
686-
assert not cast(Any, response.is_closed)
687-
assert _get_open_connections(self.client) == 1
688-
689-
for _ in response.iter_bytes():
690-
...
691-
692-
assert cast(Any, response.is_closed)
693-
assert _get_open_connections(self.client) == 0
694-
695667
@mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
696668
@pytest.mark.respx(base_url=base_url)
697669
def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
@@ -1372,33 +1344,6 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte
13721344
calculated = client._calculate_retry_timeout(remaining_retries, options, headers)
13731345
assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType]
13741346

1375-
@mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
1376-
@pytest.mark.respx(base_url=base_url)
1377-
async def test_streaming_response(self) -> None:
1378-
response = await self.client.post(
1379-
"/chat/completions",
1380-
body=dict(
1381-
messages=[
1382-
{
1383-
"role": "user",
1384-
"content": "Say this is a test",
1385-
}
1386-
],
1387-
model="gpt-3.5-turbo",
1388-
),
1389-
cast_to=AsyncAPIResponse[bytes],
1390-
options={"headers": {RAW_RESPONSE_HEADER: "stream"}},
1391-
)
1392-
1393-
assert not cast(Any, response.is_closed)
1394-
assert _get_open_connections(self.client) == 1
1395-
1396-
async for _ in response.iter_bytes():
1397-
...
1398-
1399-
assert cast(Any, response.is_closed)
1400-
assert _get_open_connections(self.client) == 0
1401-
14021347
@mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
14031348
@pytest.mark.respx(base_url=base_url)
14041349
async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:

0 commit comments

Comments
 (0)