Skip to content

Commit 39defd6

Browse files
chore(internal): fix examples (#2288)
1 parent 35262fc commit 39defd6

File tree

6 files changed

+26
-1154
lines changed

6 files changed

+26
-1154
lines changed

Diff for: .stats.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 97
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-472fe3036ea745365257fe870c0330917fb3153705c2826f49873cd631319b0a.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-32de3bc513663c5fac922c49be41c222b6ee8c0b841d8966bcdfa489d441daa3.yml
33
openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6
4-
config_hash: ef19d36c307306f14f2e1cd5c834a151
4+
config_hash: d6c61213488683418adb860a9ee1501b

Diff for: tests/api_resources/beta/test_threads.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI)
220220
max_completion_tokens=256,
221221
max_prompt_tokens=256,
222222
metadata={"foo": "string"},
223-
model="gpt-4o",
223+
model="string",
224224
parallel_tool_calls=True,
225225
response_format="auto",
226226
stream=False,
@@ -309,7 +309,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI)
309309
max_completion_tokens=256,
310310
max_prompt_tokens=256,
311311
metadata={"foo": "string"},
312-
model="gpt-4o",
312+
model="string",
313313
parallel_tool_calls=True,
314314
response_format="auto",
315315
temperature=1,
@@ -584,7 +584,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie
584584
max_completion_tokens=256,
585585
max_prompt_tokens=256,
586586
metadata={"foo": "string"},
587-
model="gpt-4o",
587+
model="string",
588588
parallel_tool_calls=True,
589589
response_format="auto",
590590
stream=False,
@@ -673,7 +673,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie
673673
max_completion_tokens=256,
674674
max_prompt_tokens=256,
675675
metadata={"foo": "string"},
676-
model="gpt-4o",
676+
model="string",
677677
parallel_tool_calls=True,
678678
response_format="auto",
679679
temperature=1,

Diff for: tests/api_resources/beta/threads/test_runs.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
5454
max_completion_tokens=256,
5555
max_prompt_tokens=256,
5656
metadata={"foo": "string"},
57-
model="gpt-4o",
57+
model="string",
5858
parallel_tool_calls=True,
5959
reasoning_effort="low",
6060
response_format="auto",
@@ -138,7 +138,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
138138
max_completion_tokens=256,
139139
max_prompt_tokens=256,
140140
metadata={"foo": "string"},
141-
model="gpt-4o",
141+
model="string",
142142
parallel_tool_calls=True,
143143
reasoning_effort="low",
144144
response_format="auto",
@@ -552,7 +552,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
552552
max_completion_tokens=256,
553553
max_prompt_tokens=256,
554554
metadata={"foo": "string"},
555-
model="gpt-4o",
555+
model="string",
556556
parallel_tool_calls=True,
557557
reasoning_effort="low",
558558
response_format="auto",
@@ -636,7 +636,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
636636
max_completion_tokens=256,
637637
max_prompt_tokens=256,
638638
metadata={"foo": "string"},
639-
model="gpt-4o",
639+
model="string",
640640
parallel_tool_calls=True,
641641
reasoning_effort="low",
642642
response_format="auto",

0 commit comments

Comments
 (0)