Skip to content

Commit 5b7edcb

Browse files
chore(internal): fix examples (#2288)
1 parent 7e8b317 commit 5b7edcb

File tree

6 files changed

+26
-1154
lines changed

6 files changed

+26
-1154
lines changed

Diff for: .stats.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 97
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-472fe3036ea745365257fe870c0330917fb3153705c2826f49873cd631319b0a.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-32de3bc513663c5fac922c49be41c222b6ee8c0b841d8966bcdfa489d441daa3.yml
33
openapi_spec_hash: ea86343b5e9858a74e85da8ab2c532f6
4-
config_hash: ef19d36c307306f14f2e1cd5c834a151
4+
config_hash: d6c61213488683418adb860a9ee1501b

Diff for: tests/api_resources/beta/test_threads.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI)
220220
max_completion_tokens=256,
221221
max_prompt_tokens=256,
222222
metadata={"foo": "string"},
223-
model="gpt-4o",
223+
model="string",
224224
parallel_tool_calls=True,
225225
response_format="auto",
226226
stream=False,
@@ -309,7 +309,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI)
309309
max_completion_tokens=256,
310310
max_prompt_tokens=256,
311311
metadata={"foo": "string"},
312-
model="gpt-4o",
312+
model="string",
313313
parallel_tool_calls=True,
314314
response_format="auto",
315315
temperature=1,
@@ -584,7 +584,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie
584584
max_completion_tokens=256,
585585
max_prompt_tokens=256,
586586
metadata={"foo": "string"},
587-
model="gpt-4o",
587+
model="string",
588588
parallel_tool_calls=True,
589589
response_format="auto",
590590
stream=False,
@@ -673,7 +673,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie
673673
max_completion_tokens=256,
674674
max_prompt_tokens=256,
675675
metadata={"foo": "string"},
676-
model="gpt-4o",
676+
model="string",
677677
parallel_tool_calls=True,
678678
response_format="auto",
679679
temperature=1,

Diff for: tests/api_resources/beta/threads/test_runs.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
5252
max_completion_tokens=256,
5353
max_prompt_tokens=256,
5454
metadata={"foo": "string"},
55-
model="gpt-4o",
55+
model="string",
5656
parallel_tool_calls=True,
5757
reasoning_effort="low",
5858
response_format="auto",
@@ -136,7 +136,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
136136
max_completion_tokens=256,
137137
max_prompt_tokens=256,
138138
metadata={"foo": "string"},
139-
model="gpt-4o",
139+
model="string",
140140
parallel_tool_calls=True,
141141
reasoning_effort="low",
142142
response_format="auto",
@@ -550,7 +550,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
550550
max_completion_tokens=256,
551551
max_prompt_tokens=256,
552552
metadata={"foo": "string"},
553-
model="gpt-4o",
553+
model="string",
554554
parallel_tool_calls=True,
555555
reasoning_effort="low",
556556
response_format="auto",
@@ -634,7 +634,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
634634
max_completion_tokens=256,
635635
max_prompt_tokens=256,
636636
metadata={"foo": "string"},
637-
model="gpt-4o",
637+
model="string",
638638
parallel_tool_calls=True,
639639
reasoning_effort="low",
640640
response_format="auto",

0 commit comments

Comments
 (0)