diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 7e58412065..de70348b9c 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -50,4 +50,3 @@ jobs:
 
       - name: Run tests
         run: ./scripts/test
-
diff --git a/.inline-snapshot/external/.gitignore b/.inline-snapshot/external/.gitignore
new file mode 100644
index 0000000000..45bef68be1
--- /dev/null
+++ b/.inline-snapshot/external/.gitignore
@@ -0,0 +1,2 @@
+# ignore all snapshots which are not refered in the source
+*-new.*
diff --git a/.inline-snapshot/external/1437bd06a9d5c414e56fd0840b9cda31e5a0af80decdbddd21c056545c6d4616.bin b/.inline-snapshot/external/1437bd06a9d5c414e56fd0840b9cda31e5a0af80decdbddd21c056545c6d4616.bin
new file mode 100644
index 0000000000..f96745e385
--- /dev/null
+++ b/.inline-snapshot/external/1437bd06a9d5c414e56fd0840b9cda31e5a0af80decdbddd21c056545c6d4616.bin
@@ -0,0 +1,100 @@
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"{\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"{\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"{\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"city"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"city"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"city"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"San"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"San"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"San"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"\",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"\",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"temperature"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"temperature"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"temperature"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"64"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"68"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"64"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"units"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"units"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"units"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"f"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"f"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"f"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\"}"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{"content":"\"}"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{"content":"\"}"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":1,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":2,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tAC53I4IJcmm22h7tLip6Irb7b6D","object":"chat.completion.chunk","created":1722934253,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":42,"total_tokens":59}}
+
+data: [DONE]
+
diff --git a/.inline-snapshot/external/3e0df46f250db854eacb34e3258ab9141d709b770a74dc025fb8770a42aabee9.bin b/.inline-snapshot/external/3e0df46f250db854eacb34e3258ab9141d709b770a74dc025fb8770a42aabee9.bin
new file mode 100644
index 0000000000..eb1cf9e733
--- /dev/null
+++ b/.inline-snapshot/external/3e0df46f250db854eacb34e3258ab9141d709b770a74dc025fb8770a42aabee9.bin
@@ -0,0 +1,180 @@
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"{\n"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" "},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"location"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"San"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" CA"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\n"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" "},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"temperature"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"N"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"/A"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\n"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" "},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"conditions"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"N"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"/A"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\n"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" "},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"humidity"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"N"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"/A"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\n"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" "},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"wind"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"_speed"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"N"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"/A"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\n"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" "},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"timestamp"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"N"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"/A"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\n"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" "},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"note"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" \""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"Real"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"-time"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" weather"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" data"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" is"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" not"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" available"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" Please"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" check"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" a"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" reliable"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" weather"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" service"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" for"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" the"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" most"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" up"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"-to"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"-date"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" information"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" on"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" San"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"'s"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" weather"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" conditions"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":".\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"}"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tACAVV5BLjrmHwZhSABB78qPvLg2","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":19,"completion_tokens":86,"total_tokens":105}}
+
+data: [DONE]
+
diff --git a/.inline-snapshot/external/70c7df71ce729e178fc5e54f0cc4861e696495d9a45c19be02cf479e28c31316.bin b/.inline-snapshot/external/70c7df71ce729e178fc5e54f0cc4861e696495d9a45c19be02cf479e28c31316.bin
new file mode 100644
index 0000000000..21c41d3958
--- /dev/null
+++ b/.inline-snapshot/external/70c7df71ce729e178fc5e54f0cc4861e696495d9a45c19be02cf479e28c31316.bin
@@ -0,0 +1,12 @@
+data: {"id":"chatcmpl-9tDU7wVJ0lzoNjC1aNIjnP99zMW2C","object":"chat.completion.chunk","created":1722946903,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":{"content":[],"refusal":null},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU7wVJ0lzoNjC1aNIjnP99zMW2C","object":"chat.completion.chunk","created":1722946903,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"Foo"},"logprobs":{"content":[{"token":"Foo","logprob":-0.006764991,"bytes":[70,111,111],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU7wVJ0lzoNjC1aNIjnP99zMW2C","object":"chat.completion.chunk","created":1722946903,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"!"},"logprobs":{"content":[{"token":"!","logprob":-0.31380808,"bytes":[33],"top_logprobs":[]}],"refusal":null},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU7wVJ0lzoNjC1aNIjnP99zMW2C","object":"chat.completion.chunk","created":1722946903,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tDU7wVJ0lzoNjC1aNIjnP99zMW2C","object":"chat.completion.chunk","created":1722946903,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":9,"completion_tokens":2,"total_tokens":11}}
+
+data: [DONE]
+
diff --git a/.inline-snapshot/external/7ae6c1a2631bf7444b8f70b592ec5f581ea9de2524599f06b0d405db8997b826.bin b/.inline-snapshot/external/7ae6c1a2631bf7444b8f70b592ec5f581ea9de2524599f06b0d405db8997b826.bin
new file mode 100644
index 0000000000..d261ccd0d0
--- /dev/null
+++ b/.inline-snapshot/external/7ae6c1a2631bf7444b8f70b592ec5f581ea9de2524599f06b0d405db8997b826.bin
@@ -0,0 +1,8 @@
+data: {"id":"chatcmpl-9tAC6v0rUCOp8tty9cizBsGmRcVIx","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null,"refusal":""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6v0rUCOp8tty9cizBsGmRcVIx","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"length"}]}
+
+data: {"id":"chatcmpl-9tAC6v0rUCOp8tty9cizBsGmRcVIx","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":1,"total_tokens":18}}
+
+data: [DONE]
+
diff --git a/.inline-snapshot/external/a346213bec7a572810bd1ffe290e08ddfad221d6632fdb200a95ca6c996238e2.bin b/.inline-snapshot/external/a346213bec7a572810bd1ffe290e08ddfad221d6632fdb200a95ca6c996238e2.bin
new file mode 100644
index 0000000000..2ceced2f1c
--- /dev/null
+++ b/.inline-snapshot/external/a346213bec7a572810bd1ffe290e08ddfad221d6632fdb200a95ca6c996238e2.bin
@@ -0,0 +1,52 @@
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"id":"call_g4Q1vRbE0CaHGOs5if8mHsBq","type":"function","function":{"name":"GetWeatherArgs","arguments":""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\"ci"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"ty\": "}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"Edinb"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"urgh"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\", \"c"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"ountry"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\": \""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"UK\", "}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"units"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\": \""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"c\"}"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"id":"call_gWj3HQxZEHnFvyJLEHIiJKBV","type":"function","function":{"name":"get_stock_price","arguments":""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"{\"ti"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"cker\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":": \"AAP"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"L\", "}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"\"exch"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"ange\":"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":" \"NA"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"SDAQ\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":1,"function":{"arguments":"}"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}
+
+data: {"id":"chatcmpl-9tAC9cJXpJZ5tGpOa9thAumwSCcmm","object":"chat.completion.chunk","created":1722934257,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":149,"completion_tokens":60,"total_tokens":209}}
+
+data: [DONE]
+
diff --git a/.inline-snapshot/external/a7097cae6a1f8dea453977a1784b7ca16b9fadc5c4551ea066d305eb1607e1c6.bin b/.inline-snapshot/external/a7097cae6a1f8dea453977a1784b7ca16b9fadc5c4551ea066d305eb1607e1c6.bin
new file mode 100644
index 0000000000..de0efe6bab
--- /dev/null
+++ b/.inline-snapshot/external/a7097cae6a1f8dea453977a1784b7ca16b9fadc5c4551ea066d305eb1607e1c6.bin
@@ -0,0 +1,28 @@
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_rQe3kzGnTr2epjx8HREg3F2a","type":"function","function":{"name":"get_weather","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"city"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"San"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" Francisco"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"state"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"CA"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}
+
+data: {"id":"chatcmpl-9tACAMQt1guB31uPOzbyivps8944W","object":"chat.completion.chunk","created":1722934258,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":48,"completion_tokens":19,"total_tokens":67}}
+
+data: [DONE]
+
diff --git a/.inline-snapshot/external/ae070a447e1ded1ad4819f7608abc40785d712248f65c8595c99879080d0eeb9.bin b/.inline-snapshot/external/ae070a447e1ded1ad4819f7608abc40785d712248f65c8595c99879080d0eeb9.bin
new file mode 100644
index 0000000000..af003a8120
--- /dev/null
+++ b/.inline-snapshot/external/ae070a447e1ded1ad4819f7608abc40785d712248f65c8595c99879080d0eeb9.bin
@@ -0,0 +1,36 @@
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_Vz6ZXciy6Y0PYfT4d9W7fYB4","type":"function","function":{"name":"GetWeatherArgs","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"city"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"Ed"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"inburgh"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"country"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"UK"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\",\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"units"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"c"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"tool_calls"}]}
+
+data: {"id":"chatcmpl-9tAC85ZjzRlx3OkdUgSGiR9aBLyL8","object":"chat.completion.chunk","created":1722934256,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":76,"completion_tokens":24,"total_tokens":100}}
+
+data: [DONE]
+
diff --git a/.inline-snapshot/external/b9d6bee9f9b8ee5bdea06cd6955521e0258cc2cef0528a17fbdadb9cc76695f0.bin b/.inline-snapshot/external/b9d6bee9f9b8ee5bdea06cd6955521e0258cc2cef0528a17fbdadb9cc76695f0.bin
new file mode 100644
index 0000000000..b4337f886a
--- /dev/null
+++ b/.inline-snapshot/external/b9d6bee9f9b8ee5bdea06cd6955521e0258cc2cef0528a17fbdadb9cc76695f0.bin
@@ -0,0 +1,72 @@
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"I'm"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" unable"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" to"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" provide"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" real"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"-time"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" weather"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" updates"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" To"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" get"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" the"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" latest"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" weather"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" information"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" for"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" San"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" recommend"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" checking"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" a"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" reliable"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" weather"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" website"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" or"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" using"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" a"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" weather"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" app"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tAC1e8N6ADc0gjWIhrsjjo4gddxQ","object":"chat.completion.chunk","created":1722934249,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":14,"completion_tokens":32,"total_tokens":46}}
+
+data: [DONE]
+
diff --git a/.inline-snapshot/external/cb77dc69b6c8289a6f1e88fa24f05bd963fe093622e5bf9a95a3ebede64714bc.bin b/.inline-snapshot/external/cb77dc69b6c8289a6f1e88fa24f05bd963fe093622e5bf9a95a3ebede64714bc.bin
new file mode 100644
index 0000000000..a95f28a54b
--- /dev/null
+++ b/.inline-snapshot/external/cb77dc69b6c8289a6f1e88fa24f05bd963fe093622e5bf9a95a3ebede64714bc.bin
@@ -0,0 +1,30 @@
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null,"refusal":""},"logprobs":{"content":null,"refusal":[]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":"I'm"},"logprobs":{"content":null,"refusal":[{"token":"I'm","logprob":-0.0010472201,"bytes":[73,39,109],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" very"},"logprobs":{"content":null,"refusal":[{"token":" very","logprob":-0.7292482,"bytes":[32,118,101,114,121],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" sorry"},"logprobs":{"content":null,"refusal":[{"token":" sorry","logprob":-5.080963e-6,"bytes":[32,115,111,114,114,121],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":","},"logprobs":{"content":null,"refusal":[{"token":",","logprob":-0.00004048445,"bytes":[44],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" but"},"logprobs":{"content":null,"refusal":[{"token":" but","logprob":-0.038046427,"bytes":[32,98,117,116],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" I"},"logprobs":{"content":null,"refusal":[{"token":" I","logprob":-0.0019351852,"bytes":[32,73],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" can't"},"logprobs":{"content":null,"refusal":[{"token":" can't","logprob":-0.008995773,"bytes":[32,99,97,110,39,116],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" assist"},"logprobs":{"content":null,"refusal":[{"token":" assist","logprob":-0.0033510819,"bytes":[32,97,115,115,105,115,116],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" with"},"logprobs":{"content":null,"refusal":[{"token":" with","logprob":-0.0036033941,"bytes":[32,119,105,116,104],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" that"},"logprobs":{"content":null,"refusal":[{"token":" that","logprob":-0.0015974608,"bytes":[32,116,104,97,116],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":"."},"logprobs":{"content":null,"refusal":[{"token":".","logprob":-0.6339823,"bytes":[46],"top_logprobs":[]}]},"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tDU0lrTPa5PKPJIVzOxKYl2LTTg4","object":"chat.completion.chunk","created":1722946896,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":12,"total_tokens":29}}
+
+data: [DONE]
+
diff --git a/.inline-snapshot/external/d79326933c1586e731a8235998194b58fc759adc3685170e0a61033241d2eda5.bin b/.inline-snapshot/external/d79326933c1586e731a8235998194b58fc759adc3685170e0a61033241d2eda5.bin
new file mode 100644
index 0000000000..895e4828ef
--- /dev/null
+++ b/.inline-snapshot/external/d79326933c1586e731a8235998194b58fc759adc3685170e0a61033241d2eda5.bin
@@ -0,0 +1,32 @@
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null,"refusal":""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":"I'm"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" very"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" sorry"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":","},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" but"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" I"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" can't"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" assist"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" with"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" that"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":" request"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"refusal":"."},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tAC6sKqquyhW1Ql3Jaj6KGNDLGNZ","object":"chat.completion.chunk","created":1722934254,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":13,"total_tokens":30}}
+
+data: [DONE]
+
diff --git a/.inline-snapshot/external/ea9a417d533b9adfece02608f2ca00f3a963d785c6fe78c35d60d038cd7a8ba0.bin b/.inline-snapshot/external/ea9a417d533b9adfece02608f2ca00f3a963d785c6fe78c35d60d038cd7a8ba0.bin
new file mode 100644
index 0000000000..869b94de1a
--- /dev/null
+++ b/.inline-snapshot/external/ea9a417d533b9adfece02608f2ca00f3a963d785c6fe78c35d60d038cd7a8ba0.bin
@@ -0,0 +1,36 @@
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"{\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"city"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"San"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":" Francisco"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"temperature"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"63"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":",\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"units"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\":\""},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"f"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"content":"\"}"},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv","object":"chat.completion.chunk","created":1722934250,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":17,"completion_tokens":14,"total_tokens":31}}
+
+data: [DONE]
+
diff --git a/.inline-snapshot/external/fb75060ede89cac360ce8baf1513a82f959a175ec05ce3c07412bbc9fd436234.bin b/.inline-snapshot/external/fb75060ede89cac360ce8baf1513a82f959a175ec05ce3c07412bbc9fd436234.bin
new file mode 100644
index 0000000000..970b1adf80
--- /dev/null
+++ b/.inline-snapshot/external/fb75060ede89cac360ce8baf1513a82f959a175ec05ce3c07412bbc9fd436234.bin
@@ -0,0 +1,22 @@
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"role":"assistant","content":null,"tool_calls":[{"index":0,"id":"call_9rqjEc1DQRADTYGVV45LbZwL","type":"function","function":{"name":"get_weather","arguments":""}}],"refusal":null},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"{\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"city"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\":\""}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"New"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" York"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":" City"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{"tool_calls":[{"index":0,"function":{"arguments":"\"}"}}]},"logprobs":null,"finish_reason":null}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
+
+data: {"id":"chatcmpl-9tACC99384oWbk9upfFD1gITJehjE","object":"chat.completion.chunk","created":1722934260,"model":"gpt-4o-so","system_fingerprint":"fp_e1a05a1dce","choices":[],"usage":{"prompt_tokens":44,"completion_tokens":16,"total_tokens":60}}
+
+data: [DONE]
+
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 4d14a67e1c..0c37ae42ca 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
 {
-  ".": "1.39.0"
+  ".": "1.40.0"
 }
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 6cc7757636..da26758316 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
 configured_endpoints: 68
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b04761ffd2adad3cc19a6dc6fc696ac445878219972f891881a967340fa9a6b0.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-c36d30a94622922f83d56a025cdf0095ff7cb18a5138838c698c8443f21fb3a8.yml
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b9cc30e307..2454a9a6cc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,19 @@
 # Changelog
 
+## 1.40.0 (2024-08-06)
+
+Full Changelog: [v1.39.0...v1.40.0](https://github.com/openai/openai-python/compare/v1.39.0...v1.40.0)
+
+### Features
+
+* **api:** add structured outputs support ([e8dba7d](https://github.com/openai/openai-python/commit/e8dba7d0e08a7d0de5952be716e0efe9ae373759))
+
+
+### Chores
+
+* **internal:** bump ruff version ([#1604](https://github.com/openai/openai-python/issues/1604)) ([3e19a87](https://github.com/openai/openai-python/commit/3e19a87255d8e92716689656afaa3f16297773b6))
+* **internal:** update pydantic compat helper function ([#1607](https://github.com/openai/openai-python/issues/1607)) ([973c18b](https://github.com/openai/openai-python/commit/973c18b259a0e4a8134223f50a5f660b86650949))
+
 ## 1.39.0 (2024-08-05)
 
 Full Changelog: [v1.38.0...v1.39.0](https://github.com/openai/openai-python/compare/v1.38.0...v1.39.0)
diff --git a/api.md b/api.md
index 85e81467dc..1687476d86 100644
--- a/api.md
+++ b/api.md
@@ -1,7 +1,14 @@
 # Shared Types
 
 ```python
-from openai.types import ErrorObject, FunctionDefinition, FunctionParameters
+from openai.types import (
+    ErrorObject,
+    FunctionDefinition,
+    FunctionParameters,
+    ResponseFormatJSONObject,
+    ResponseFormatJSONSchema,
+    ResponseFormatText,
+)
 ```
 
 # Completions
@@ -35,6 +42,7 @@ from openai.types.chat import (
     ChatCompletionChunk,
     ChatCompletionContentPart,
     ChatCompletionContentPartImage,
+    ChatCompletionContentPartRefusal,
     ChatCompletionContentPartText,
     ChatCompletionFunctionCallOption,
     ChatCompletionFunctionMessageParam,
@@ -296,7 +304,6 @@ Types:
 
 ```python
 from openai.types.beta import (
-    AssistantResponseFormat,
     AssistantResponseFormatOption,
     AssistantToolChoice,
     AssistantToolChoiceFunction,
@@ -397,6 +404,8 @@ from openai.types.beta.threads import (
     MessageDeleted,
     MessageDelta,
     MessageDeltaEvent,
+    RefusalContentBlock,
+    RefusalDeltaBlock,
     Text,
     TextContentBlock,
     TextContentBlockParam,
diff --git a/examples/parsing.py b/examples/parsing.py
new file mode 100644
index 0000000000..17e5db52ec
--- /dev/null
+++ b/examples/parsing.py
@@ -0,0 +1,36 @@
+from typing import List
+
+import rich
+from pydantic import BaseModel
+
+from openai import OpenAI
+
+
+class Step(BaseModel):
+    explanation: str
+    output: str
+
+
+class MathResponse(BaseModel):
+    steps: List[Step]
+    final_answer: str
+
+
+client = OpenAI()
+
+completion = client.beta.chat.completions.parse(
+    model="gpt-4o-2024-08-06",
+    messages=[
+        {"role": "system", "content": "You are a helpful math tutor."},
+        {"role": "user", "content": "solve 8x + 31 = 2"},
+    ],
+    response_format=MathResponse,
+)
+
+message = completion.choices[0].message
+if message.parsed:
+    rich.print(message.parsed.steps)
+
+    print("answer: ", message.parsed.final_answer)
+else:
+    print(message.refusal)
diff --git a/examples/parsing_stream.py b/examples/parsing_stream.py
new file mode 100644
index 0000000000..6c6f078f77
--- /dev/null
+++ b/examples/parsing_stream.py
@@ -0,0 +1,42 @@
+from typing import List
+
+import rich
+from pydantic import BaseModel
+
+from openai import OpenAI
+
+
+class Step(BaseModel):
+    explanation: str
+    output: str
+
+
+class MathResponse(BaseModel):
+    steps: List[Step]
+    final_answer: str
+
+
+client = OpenAI()
+
+with client.beta.chat.completions.stream(
+    model="gpt-4o-2024-08-06",
+    messages=[
+        {"role": "system", "content": "You are a helpful math tutor."},
+        {"role": "user", "content": "solve 8x + 31 = 2"},
+    ],
+    response_format=MathResponse,
+) as stream:
+    for event in stream:
+        if event.type == "content.delta":
+            print(event.delta, end="", flush=True)
+        elif event.type == "content.done":
+            print("\n")
+            if event.parsed is not None:
+                print(f"answer: {event.parsed.final_answer}")
+        elif event.type == "refusal.delta":
+            print(event.delta, end="", flush=True)
+        elif event.type == "refusal.done":
+            print()
+
+print("---------------")
+rich.print(stream.get_final_completion())
diff --git a/examples/parsing_tools.py b/examples/parsing_tools.py
new file mode 100644
index 0000000000..c6065eeb7a
--- /dev/null
+++ b/examples/parsing_tools.py
@@ -0,0 +1,80 @@
+from enum import Enum
+from typing import List, Union
+
+import rich
+from pydantic import BaseModel
+
+import openai
+from openai import OpenAI
+
+
+class Table(str, Enum):
+    orders = "orders"
+    customers = "customers"
+    products = "products"
+
+
+class Column(str, Enum):
+    id = "id"
+    status = "status"
+    expected_delivery_date = "expected_delivery_date"
+    delivered_at = "delivered_at"
+    shipped_at = "shipped_at"
+    ordered_at = "ordered_at"
+    canceled_at = "canceled_at"
+
+
+class Operator(str, Enum):
+    eq = "="
+    gt = ">"
+    lt = "<"
+    le = "<="
+    ge = ">="
+    ne = "!="
+
+
+class OrderBy(str, Enum):
+    asc = "asc"
+    desc = "desc"
+
+
+class DynamicValue(BaseModel):
+    column_name: str
+
+
+class Condition(BaseModel):
+    column: str
+    operator: Operator
+    value: Union[str, int, DynamicValue]
+
+
+class Query(BaseModel):
+    table_name: Table
+    columns: List[Column]
+    conditions: List[Condition]
+    order_by: OrderBy
+
+
+client = OpenAI()
+
+completion = client.beta.chat.completions.parse(
+    model="gpt-4o-2024-08-06",
+    messages=[
+        {
+            "role": "system",
+            "content": "You are a helpful assistant. The current date is August 6, 2024. You help users query for the data they are looking for by calling the query function.",
+        },
+        {
+            "role": "user",
+            "content": "look up all my orders in november of last year that were fulfilled but not delivered on time",
+        },
+    ],
+    tools=[
+        openai.pydantic_function_tool(Query),
+    ],
+)
+
+tool_call = (completion.choices[0].message.tool_calls or [])[0]
+rich.print(tool_call.function)
+assert isinstance(tool_call.function.parsed_arguments, Query)
+print(tool_call.function.parsed_arguments.table_name)
diff --git a/examples/parsing_tools_stream.py b/examples/parsing_tools_stream.py
new file mode 100644
index 0000000000..eea6f6a43a
--- /dev/null
+++ b/examples/parsing_tools_stream.py
@@ -0,0 +1,38 @@
+from __future__ import annotations
+
+import rich
+from pydantic import BaseModel
+
+import openai
+from openai import OpenAI
+
+
+class GetWeather(BaseModel):
+    city: str
+    country: str
+
+
+client = OpenAI()
+
+
+with client.beta.chat.completions.stream(
+    model="gpt-4o-2024-08-06",
+    messages=[
+        {
+            "role": "user",
+            "content": "What's the weather like in SF and New York?",
+        },
+    ],
+    tools=[
+        # because we're using `.parse_stream()`, the returned tool calls
+        # will be automatically deserialized into this `GetWeather` type
+        openai.pydantic_function_tool(GetWeather, name="get_weather"),
+    ],
+    parallel_tool_calls=True,
+) as stream:
+    for event in stream:
+        if event.type == "tool_calls.function.arguments.delta" or event.type == "tool_calls.function.arguments.done":
+            rich.get_console().print(event, width=80)
+
+print("----\n")
+rich.print(stream.get_final_completion())
diff --git a/helpers.md b/helpers.md
index 3508b59a33..2e0d314b50 100644
--- a/helpers.md
+++ b/helpers.md
@@ -1,6 +1,280 @@
+# Structured Outputs Parsing Helpers
+
+The OpenAI API supports extracting JSON from the model with the `response_format` request param, for more details on the API, see [this guide](https://platform.openai.com/docs/guides/structured-outputs).
+
+The SDK provides a `client.beta.chat.completions.parse()` method which is a wrapper over the `client.chat.completions.create()` that
+provides richer integrations with Python specific types & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
+
+## Auto-parsing response content with Pydantic models
+
+You can pass a pydantic model to the `.parse()` method and the SDK will automatically convert the model
+into a JSON schema, send it to the API and parse the response content back into the given model.
+
+```py
+from typing import List
+from pydantic import BaseModel
+from openai import OpenAI
+
+class Step(BaseModel):
+    explanation: str
+    output: str
+
+class MathResponse(BaseModel):
+    steps: List[Step]
+    final_answer: str
+
+client = OpenAI()
+completion = client.beta.chat.completions.parse(
+    model="gpt-4o-2024-08-06",
+    messages=[
+        {"role": "system", "content": "You are a helpful math tutor."},
+        {"role": "user", "content": "solve 8x + 31 = 2"},
+    ],
+    response_format=MathResponse,
+)
+
+message = completion.choices[0].message
+if message.parsed:
+    print(message.parsed.steps)
+    print("answer: ", message.parsed.final_answer)
+else:
+    print(message.refusal)
+```
+
+## Auto-parsing function tool calls
+
+The `.parse()` method will also automatically parse `function` tool calls if:
+- You use the `openai.pydantic_function_tool()` helper method
+- You mark your tool schema with `"strict": True`
+
+For example:
+
+```py
+from enum import Enum
+from typing import List, Union
+from pydantic import BaseModel
+import openai
+
+class Table(str, Enum):
+    orders = "orders"
+    customers = "customers"
+    products = "products"
+
+class Column(str, Enum):
+    id = "id"
+    status = "status"
+    expected_delivery_date = "expected_delivery_date"
+    delivered_at = "delivered_at"
+    shipped_at = "shipped_at"
+    ordered_at = "ordered_at"
+    canceled_at = "canceled_at"
+
+class Operator(str, Enum):
+    eq = "="
+    gt = ">"
+    lt = "<"
+    le = "<="
+    ge = ">="
+    ne = "!="
+
+class OrderBy(str, Enum):
+    asc = "asc"
+    desc = "desc"
+
+class DynamicValue(BaseModel):
+    column_name: str
+
+class Condition(BaseModel):
+    column: str
+    operator: Operator
+    value: Union[str, int, DynamicValue]
+
+class Query(BaseModel):
+    table_name: Table
+    columns: List[Column]
+    conditions: List[Condition]
+    order_by: OrderBy
+
+client = openai.OpenAI()
+completion = client.beta.chat.completions.parse(
+    model="gpt-4o-2024-08-06",
+    messages=[
+        {
+            "role": "system",
+            "content": "You are a helpful assistant. The current date is August 6, 2024. You help users query for the data they are looking for by calling the query function.",
+        },
+        {
+            "role": "user",
+            "content": "look up all my orders in may of last year that were fulfilled but not delivered on time",
+        },
+    ],
+    tools=[
+        openai.pydantic_function_tool(Query),
+    ],
+)
+
+tool_call = (completion.choices[0].message.tool_calls or [])[0]
+print(tool_call.function)
+assert isinstance(tool_call.function.parsed_arguments, Query)
+print(tool_call.function.parsed_arguments.table_name)
+```
+
+### Differences from `.create()`
+
+The `beta.chat.completions.parse()` method imposes some additional restrictions on it's usage that `chat.completions.create()` does not. 
+
+- If the completion completes with `finish_reason` set to `length` or `content_filter`, the `LengthFinishReasonError` / `ContentFilterFinishReasonError` errors will be raised.
+- Only strict function tools can be passed, e.g. `{'type': 'function', 'function': {..., 'strict': True}}`
+
 # Streaming Helpers
 
-OpenAI supports streaming responses when interacting with the [Assistant](#assistant-streaming-api) APIs.
+OpenAI supports streaming responses when interacting with the [Chat Completion] & [Assistant](#assistant-streaming-api) APIs.
+
+## Chat Completions API
+
+The SDK provides a `.beta.chat.completions.stream()` method that wraps the `.chat.completions.create(stream=True)` stream providing a more granular event API & automatic accumulation of each delta.
+
+It also supports all aforementioned [parsing helpers](#parsing-helpers).
+
+Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
+
+```py
+async with client.beta.chat.completions.stream(
+    model='gpt-4o-2024-08-06',
+    messages=[...],
+) as stream:
+    async for event in stream:
+        if event.type == 'content.delta':
+            print(event.content, flush=True, end='')
+```
+
+When the context manager is entered, a `ChatCompletionStream` / `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator in the sync client and an async iterator in the async client. The full list of events that are yielded by the iterator are outlined [below](#chat-completions-events).
+
+When the context manager exits, the response will be closed, however the `stream` instance is still available outside
+the context manager.
+
+### Chat Completions Events
+
+These events allow you to track the progress of the chat completion generation, access partial results, and handle different aspects of the stream separately.
+
+Below is a list of the different event types you may encounter:
+
+#### ChunkEvent
+
+Emitted for every chunk received from the API.
+
+- `type`: `"chunk"`
+- `chunk`: The raw `ChatCompletionChunk` object received from the API
+- `snapshot`: The current accumulated state of the chat completion
+
+#### ContentDeltaEvent
+
+Emitted for every chunk containing new content.
+
+- `type`: `"content.delta"`
+- `delta`: The new content string received in this chunk
+- `snapshot`: The accumulated content so far
+- `parsed`: The partially parsed content (if applicable)
+
+#### ContentDoneEvent
+
+Emitted when the content generation is complete. May be fired multiple times if there are multiple choices.
+
+- `type`: `"content.done"`
+- `content`: The full generated content
+- `parsed`: The fully parsed content (if applicable)
+
+#### RefusalDeltaEvent
+
+Emitted when a chunk contains part of a content refusal.
+
+- `type`: `"refusal.delta"`
+- `delta`: The new refusal content string received in this chunk
+- `snapshot`: The accumulated refusal content string so far
+
+#### RefusalDoneEvent
+
+Emitted when the refusal content is complete.
+
+- `type`: `"refusal.done"`
+- `refusal`: The full refusal content
+
+#### FunctionToolCallArgumentsDeltaEvent
+
+Emitted when a chunk contains part of a function tool call's arguments.
+
+- `type`: `"tool_calls.function.arguments.delta"`
+- `name`: The name of the function being called
+- `index`: The index of the tool call
+- `arguments`: The accumulated raw JSON string of arguments
+- `parsed_arguments`: The partially parsed arguments object
+- `arguments_delta`: The new JSON string fragment received in this chunk
+
+#### FunctionToolCallArgumentsDoneEvent
+
+Emitted when a function tool call's arguments are complete.
+
+- `type`: `"tool_calls.function.arguments.done"`
+- `name`: The name of the function being called
+- `index`: The index of the tool call
+- `arguments`: The full raw JSON string of arguments
+- `parsed_arguments`: The fully parsed arguments object. If you used `openai.pydantic_function_tool()` this will be an instance of the given model.
+
+#### LogprobsContentDeltaEvent
+
+Emitted when a chunk contains new content [log probabilities](https://cookbook.openai.com/examples/using_logprobs).
+
+- `type`: `"logprobs.content.delta"`
+- `content`: A list of the new log probabilities received in this chunk
+- `snapshot`: A list of the accumulated log probabilities so far
+
+#### LogprobsContentDoneEvent
+
+Emitted when all content [log probabilities](https://cookbook.openai.com/examples/using_logprobs) have been received.
+
+- `type`: `"logprobs.content.done"`
+- `content`: The full list of token log probabilities for the content
+
+#### LogprobsRefusalDeltaEvent
+
+Emitted when a chunk contains new refusal [log probabilities](https://cookbook.openai.com/examples/using_logprobs).
+
+- `type`: `"logprobs.refusal.delta"`
+- `refusal`: A list of the new log probabilities received in this chunk
+- `snapshot`: A list of the accumulated log probabilities so far
+
+#### LogprobsRefusalDoneEvent
+
+Emitted when all refusal [log probabilities](https://cookbook.openai.com/examples/using_logprobs) have been received.
+
+- `type`: `"logprobs.refusal.done"`
+- `refusal`: The full list of token log probabilities for the refusal
+
+### Chat Completions stream methods
+
+A handful of helper methods are provided on the stream class for additional convenience,
+
+**`.get_final_completion()`**
+
+Returns the accumulated `ParsedChatCompletion` object
+
+```py
+async with client.beta.chat.completions.stream(...) as stream:
+    ...
+
+completion = await stream.get_final_completion()
+print(completion.choices[0].message)
+```
+
+**`.until_done()`**
+
+If you want to wait for the stream to complete, you can use the `.until_done()` method.
+
+```py
+async with client.beta.chat.completions.stream(...) as stream:
+    await stream.until_done()
+    # stream is now finished
+```
 
 ## Assistant Streaming API
 
diff --git a/pyproject.toml b/pyproject.toml
index d0527bd84e..1e86c44706 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
 [project]
 name = "openai"
-version = "1.39.0"
+version = "1.40.0"
 description = "The official Python library for the openai API"
 dynamic = ["readme"]
 license = "Apache-2.0"
@@ -10,12 +10,13 @@ authors = [
 dependencies = [
     "httpx>=0.23.0, <1",
     "pydantic>=1.9.0, <3",
-    "typing-extensions>=4.7, <5",
+    "typing-extensions>=4.11, <5",
     "anyio>=3.5.0, <5",
     "distro>=1.7.0, <2",
     "sniffio",
     "cached-property; python_version < '3.8'",
-    "tqdm > 4"
+    "tqdm > 4",
+    "jiter>=0.4.0, <1",
 ]
 requires-python = ">= 3.7.1"
 classifiers = [
@@ -83,8 +84,8 @@ format = { chain = [
   "check:ruff",
   "typecheck",
 ]}
-"check:ruff" = "ruff ."
-"fix:ruff" = "ruff --fix ."
+"check:ruff" = "ruff check ."
+"fix:ruff" = "ruff check --fix ."
 
 typecheck = { chain = [
   "typecheck:pyright",
@@ -168,6 +169,11 @@ reportPrivateUsage = false
 line-length = 120
 output-format = "grouped"
 target-version = "py37"
+
+[tool.ruff.format]
+docstring-code-format = true
+
+[tool.ruff.lint]
 select = [
   # isort
   "I",
@@ -198,9 +204,6 @@ unfixable = [
 ]
 ignore-init-module-imports = true
 
-[tool.ruff.format]
-docstring-code-format = true
-
 [tool.ruff.lint.flake8-tidy-imports.banned-api]
 "functools.lru_cache".msg = "This function does not retain type information for the wrapped function's arguments; The `lru_cache` function from `_utils` should be used instead"
 
diff --git a/requirements-dev.lock b/requirements-dev.lock
index 3ad6b88f68..f4797f432b 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -72,6 +72,8 @@ importlib-metadata==7.0.0
 iniconfig==2.0.0
     # via pytest
 inline-snapshot==0.10.2
+jiter==0.5.0
+    # via openai
 markdown-it-py==3.0.0
     # via rich
 mdurl==0.1.2
@@ -139,7 +141,7 @@ requests==2.31.0
 respx==0.20.2
 rich==13.7.1
     # via inline-snapshot
-ruff==0.1.9
+ruff==0.5.6
 setuptools==68.2.2
     # via nodeenv
 six==1.16.0
@@ -169,7 +171,7 @@ types-pytz==2024.1.0.20240417
 types-toml==0.10.8.20240310
     # via inline-snapshot
 types-tqdm==4.66.0.2
-typing-extensions==4.8.0
+typing-extensions==4.12.2
     # via azure-core
     # via black
     # via mypy
diff --git a/requirements.lock b/requirements.lock
index 3c3d6ae702..de632aefbd 100644
--- a/requirements.lock
+++ b/requirements.lock
@@ -30,6 +30,8 @@ httpx==0.25.2
 idna==3.4
     # via anyio
     # via httpx
+jiter==0.5.0
+    # via openai
 numpy==1.26.4
     # via openai
     # via pandas
@@ -56,7 +58,7 @@ tqdm==4.66.1
     # via openai
 types-pytz==2024.1.0.20240417
     # via pandas-stubs
-typing-extensions==4.8.0
+typing-extensions==4.12.2
     # via openai
     # via pydantic
     # via pydantic-core
diff --git a/src/openai/__init__.py b/src/openai/__init__.py
index 0e87ae9259..3c1ebb573d 100644
--- a/src/openai/__init__.py
+++ b/src/openai/__init__.py
@@ -26,8 +26,10 @@
     AuthenticationError,
     InternalServerError,
     PermissionDeniedError,
+    LengthFinishReasonError,
     UnprocessableEntityError,
     APIResponseValidationError,
+    ContentFilterFinishReasonError,
 )
 from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient
 from ._utils._logs import setup_logging as _setup_logging
@@ -55,6 +57,8 @@
     "UnprocessableEntityError",
     "RateLimitError",
     "InternalServerError",
+    "LengthFinishReasonError",
+    "ContentFilterFinishReasonError",
     "Timeout",
     "RequestOptions",
     "Client",
@@ -72,7 +76,7 @@
     "DefaultAsyncHttpxClient",
 ]
 
-from .lib import azure as _azure
+from .lib import azure as _azure, pydantic_function_tool as pydantic_function_tool
 from .version import VERSION as VERSION
 from .lib.azure import AzureOpenAI as AzureOpenAI, AsyncAzureOpenAI as AsyncAzureOpenAI
 from .lib._old_api import *
diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py
index c8fce0bea4..3388d69fab 100644
--- a/src/openai/_base_client.py
+++ b/src/openai/_base_client.py
@@ -125,16 +125,14 @@ def __init__(
         self,
         *,
         url: URL,
-    ) -> None:
-        ...
+    ) -> None: ...
 
     @overload
     def __init__(
         self,
         *,
         params: Query,
-    ) -> None:
-        ...
+    ) -> None: ...
 
     def __init__(
         self,
@@ -167,8 +165,7 @@ def has_next_page(self) -> bool:
             return False
         return self.next_page_info() is not None
 
-    def next_page_info(self) -> Optional[PageInfo]:
-        ...
+    def next_page_info(self) -> Optional[PageInfo]: ...
 
     def _get_page_items(self) -> Iterable[_T]:  # type: ignore[empty-body]
         ...
@@ -904,8 +901,7 @@ def request(
         *,
         stream: Literal[True],
         stream_cls: Type[_StreamT],
-    ) -> _StreamT:
-        ...
+    ) -> _StreamT: ...
 
     @overload
     def request(
@@ -915,8 +911,7 @@ def request(
         remaining_retries: Optional[int] = None,
         *,
         stream: Literal[False] = False,
-    ) -> ResponseT:
-        ...
+    ) -> ResponseT: ...
 
     @overload
     def request(
@@ -927,8 +922,7 @@ def request(
         *,
         stream: bool = False,
         stream_cls: Type[_StreamT] | None = None,
-    ) -> ResponseT | _StreamT:
-        ...
+    ) -> ResponseT | _StreamT: ...
 
     def request(
         self,
@@ -1172,8 +1166,7 @@ def get(
         cast_to: Type[ResponseT],
         options: RequestOptions = {},
         stream: Literal[False] = False,
-    ) -> ResponseT:
-        ...
+    ) -> ResponseT: ...
 
     @overload
     def get(
@@ -1184,8 +1177,7 @@ def get(
         options: RequestOptions = {},
         stream: Literal[True],
         stream_cls: type[_StreamT],
-    ) -> _StreamT:
-        ...
+    ) -> _StreamT: ...
 
     @overload
     def get(
@@ -1196,8 +1188,7 @@ def get(
         options: RequestOptions = {},
         stream: bool,
         stream_cls: type[_StreamT] | None = None,
-    ) -> ResponseT | _StreamT:
-        ...
+    ) -> ResponseT | _StreamT: ...
 
     def get(
         self,
@@ -1223,8 +1214,7 @@ def post(
         options: RequestOptions = {},
         files: RequestFiles | None = None,
         stream: Literal[False] = False,
-    ) -> ResponseT:
-        ...
+    ) -> ResponseT: ...
 
     @overload
     def post(
@@ -1237,8 +1227,7 @@ def post(
         files: RequestFiles | None = None,
         stream: Literal[True],
         stream_cls: type[_StreamT],
-    ) -> _StreamT:
-        ...
+    ) -> _StreamT: ...
 
     @overload
     def post(
@@ -1251,8 +1240,7 @@ def post(
         files: RequestFiles | None = None,
         stream: bool,
         stream_cls: type[_StreamT] | None = None,
-    ) -> ResponseT | _StreamT:
-        ...
+    ) -> ResponseT | _StreamT: ...
 
     def post(
         self,
@@ -1485,8 +1473,7 @@ async def request(
         *,
         stream: Literal[False] = False,
         remaining_retries: Optional[int] = None,
-    ) -> ResponseT:
-        ...
+    ) -> ResponseT: ...
 
     @overload
     async def request(
@@ -1497,8 +1484,7 @@ async def request(
         stream: Literal[True],
         stream_cls: type[_AsyncStreamT],
         remaining_retries: Optional[int] = None,
-    ) -> _AsyncStreamT:
-        ...
+    ) -> _AsyncStreamT: ...
 
     @overload
     async def request(
@@ -1509,8 +1495,7 @@ async def request(
         stream: bool,
         stream_cls: type[_AsyncStreamT] | None = None,
         remaining_retries: Optional[int] = None,
-    ) -> ResponseT | _AsyncStreamT:
-        ...
+    ) -> ResponseT | _AsyncStreamT: ...
 
     async def request(
         self,
@@ -1739,8 +1724,7 @@ async def get(
         cast_to: Type[ResponseT],
         options: RequestOptions = {},
         stream: Literal[False] = False,
-    ) -> ResponseT:
-        ...
+    ) -> ResponseT: ...
 
     @overload
     async def get(
@@ -1751,8 +1735,7 @@ async def get(
         options: RequestOptions = {},
         stream: Literal[True],
         stream_cls: type[_AsyncStreamT],
-    ) -> _AsyncStreamT:
-        ...
+    ) -> _AsyncStreamT: ...
 
     @overload
     async def get(
@@ -1763,8 +1746,7 @@ async def get(
         options: RequestOptions = {},
         stream: bool,
         stream_cls: type[_AsyncStreamT] | None = None,
-    ) -> ResponseT | _AsyncStreamT:
-        ...
+    ) -> ResponseT | _AsyncStreamT: ...
 
     async def get(
         self,
@@ -1788,8 +1770,7 @@ async def post(
         files: RequestFiles | None = None,
         options: RequestOptions = {},
         stream: Literal[False] = False,
-    ) -> ResponseT:
-        ...
+    ) -> ResponseT: ...
 
     @overload
     async def post(
@@ -1802,8 +1783,7 @@ async def post(
         options: RequestOptions = {},
         stream: Literal[True],
         stream_cls: type[_AsyncStreamT],
-    ) -> _AsyncStreamT:
-        ...
+    ) -> _AsyncStreamT: ...
 
     @overload
     async def post(
@@ -1816,8 +1796,7 @@ async def post(
         options: RequestOptions = {},
         stream: bool,
         stream_cls: type[_AsyncStreamT] | None = None,
-    ) -> ResponseT | _AsyncStreamT:
-        ...
+    ) -> ResponseT | _AsyncStreamT: ...
 
     async def post(
         self,
diff --git a/src/openai/_client.py b/src/openai/_client.py
index 8b404e234d..d3ee6cf0f1 100644
--- a/src/openai/_client.py
+++ b/src/openai/_client.py
@@ -151,7 +151,7 @@ def __init__(
     @property
     @override
     def qs(self) -> Querystring:
-        return Querystring(array_format="comma")
+        return Querystring(array_format="brackets")
 
     @property
     @override
@@ -365,7 +365,7 @@ def __init__(
     @property
     @override
     def qs(self) -> Querystring:
-        return Querystring(array_format="comma")
+        return Querystring(array_format="brackets")
 
     @property
     @override
diff --git a/src/openai/_compat.py b/src/openai/_compat.py
index c919b5adb3..c0dd8c1ee5 100644
--- a/src/openai/_compat.py
+++ b/src/openai/_compat.py
@@ -7,7 +7,7 @@
 import pydantic
 from pydantic.fields import FieldInfo
 
-from ._types import StrBytesIntFloat
+from ._types import IncEx, StrBytesIntFloat
 
 _T = TypeVar("_T")
 _ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel)
@@ -133,17 +133,20 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
 def model_dump(
     model: pydantic.BaseModel,
     *,
+    exclude: IncEx = None,
     exclude_unset: bool = False,
     exclude_defaults: bool = False,
 ) -> dict[str, Any]:
     if PYDANTIC_V2:
         return model.model_dump(
+            exclude=exclude,
             exclude_unset=exclude_unset,
             exclude_defaults=exclude_defaults,
         )
     return cast(
         "dict[str, Any]",
         model.dict(  # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
+            exclude=exclude,
             exclude_unset=exclude_unset,
             exclude_defaults=exclude_defaults,
         ),
@@ -156,25 +159,34 @@ def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
     return model.parse_obj(data)  # pyright: ignore[reportDeprecated]
 
 
+def model_parse_json(model: type[_ModelT], data: str | bytes) -> _ModelT:
+    if PYDANTIC_V2:
+        return model.model_validate_json(data)
+    return model.parse_raw(data)  # pyright: ignore[reportDeprecated]
+
+
+def model_json_schema(model: type[_ModelT]) -> dict[str, Any]:
+    if PYDANTIC_V2:
+        return model.model_json_schema()
+    return model.schema()  # pyright: ignore[reportDeprecated]
+
+
 # generic models
 if TYPE_CHECKING:
 
-    class GenericModel(pydantic.BaseModel):
-        ...
+    class GenericModel(pydantic.BaseModel): ...
 
 else:
     if PYDANTIC_V2:
         # there no longer needs to be a distinction in v2 but
         # we still have to create our own subclass to avoid
         # inconsistent MRO ordering errors
-        class GenericModel(pydantic.BaseModel):
-            ...
+        class GenericModel(pydantic.BaseModel): ...
 
     else:
         import pydantic.generics
 
-        class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel):
-            ...
+        class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...
 
 
 # cached properties
@@ -193,26 +205,21 @@ class typed_cached_property(Generic[_T]):
         func: Callable[[Any], _T]
         attrname: str | None
 
-        def __init__(self, func: Callable[[Any], _T]) -> None:
-            ...
+        def __init__(self, func: Callable[[Any], _T]) -> None: ...
 
         @overload
-        def __get__(self, instance: None, owner: type[Any] | None = None) -> Self:
-            ...
+        def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ...
 
         @overload
-        def __get__(self, instance: object, owner: type[Any] | None = None) -> _T:
-            ...
+        def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ...
 
         def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self:
             raise NotImplementedError()
 
-        def __set_name__(self, owner: type[Any], name: str) -> None:
-            ...
+        def __set_name__(self, owner: type[Any], name: str) -> None: ...
 
         # __set__ is not defined at runtime, but @cached_property is designed to be settable
-        def __set__(self, instance: object, value: _T) -> None:
-            ...
+        def __set__(self, instance: object, value: _T) -> None: ...
 else:
     try:
         from functools import cached_property as cached_property
diff --git a/src/openai/_exceptions.py b/src/openai/_exceptions.py
index f6731cfac5..f44f90b52f 100644
--- a/src/openai/_exceptions.py
+++ b/src/openai/_exceptions.py
@@ -19,6 +19,8 @@
     "UnprocessableEntityError",
     "RateLimitError",
     "InternalServerError",
+    "LengthFinishReasonError",
+    "ContentFilterFinishReasonError",
 ]
 
 
@@ -125,3 +127,17 @@ class RateLimitError(APIStatusError):
 
 class InternalServerError(APIStatusError):
     pass
+
+
+class LengthFinishReasonError(OpenAIError):
+    def __init__(self) -> None:
+        super().__init__(
+            f"Could not parse response content as the length limit was reached",
+        )
+
+
+class ContentFilterFinishReasonError(OpenAIError):
+    def __init__(self) -> None:
+        super().__init__(
+            f"Could not parse response content as the request was rejected by the content filter",
+        )
diff --git a/src/openai/_files.py b/src/openai/_files.py
index ad7b668b4b..801a0d2928 100644
--- a/src/openai/_files.py
+++ b/src/openai/_files.py
@@ -39,13 +39,11 @@ def assert_is_file_content(obj: object, *, key: str | None = None) -> None:
 
 
 @overload
-def to_httpx_files(files: None) -> None:
-    ...
+def to_httpx_files(files: None) -> None: ...
 
 
 @overload
-def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles:
-    ...
+def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ...
 
 
 def to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None:
@@ -83,13 +81,11 @@ def _read_file_content(file: FileContent) -> HttpxFileContent:
 
 
 @overload
-async def async_to_httpx_files(files: None) -> None:
-    ...
+async def async_to_httpx_files(files: None) -> None: ...
 
 
 @overload
-async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles:
-    ...
+async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ...
 
 
 async def async_to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None:
diff --git a/src/openai/_legacy_response.py b/src/openai/_legacy_response.py
index 66d7606a60..c42fb8b83e 100644
--- a/src/openai/_legacy_response.py
+++ b/src/openai/_legacy_response.py
@@ -92,12 +92,10 @@ def request_id(self) -> str | None:
         return self.http_response.headers.get("x-request-id")  # type: ignore[no-any-return]
 
     @overload
-    def parse(self, *, to: type[_T]) -> _T:
-        ...
+    def parse(self, *, to: type[_T]) -> _T: ...
 
     @overload
-    def parse(self) -> R:
-        ...
+    def parse(self) -> R: ...
 
     def parse(self, *, to: type[_T] | None = None) -> R | _T:
         """Returns the rich python representation of this response's data.
diff --git a/src/openai/_response.py b/src/openai/_response.py
index 3bf4de4287..f9d91786f6 100644
--- a/src/openai/_response.py
+++ b/src/openai/_response.py
@@ -268,12 +268,10 @@ def request_id(self) -> str | None:
         return self.http_response.headers.get("x-request-id")  # type: ignore[no-any-return]
 
     @overload
-    def parse(self, *, to: type[_T]) -> _T:
-        ...
+    def parse(self, *, to: type[_T]) -> _T: ...
 
     @overload
-    def parse(self) -> R:
-        ...
+    def parse(self) -> R: ...
 
     def parse(self, *, to: type[_T] | None = None) -> R | _T:
         """Returns the rich python representation of this response's data.
@@ -376,12 +374,10 @@ def request_id(self) -> str | None:
         return self.http_response.headers.get("x-request-id")  # type: ignore[no-any-return]
 
     @overload
-    async def parse(self, *, to: type[_T]) -> _T:
-        ...
+    async def parse(self, *, to: type[_T]) -> _T: ...
 
     @overload
-    async def parse(self) -> R:
-        ...
+    async def parse(self) -> R: ...
 
     async def parse(self, *, to: type[_T] | None = None) -> R | _T:
         """Returns the rich python representation of this response's data.
diff --git a/src/openai/_types.py b/src/openai/_types.py
index de9b1dd48b..5611b2d38f 100644
--- a/src/openai/_types.py
+++ b/src/openai/_types.py
@@ -112,8 +112,7 @@ class NotGiven:
     For example:
 
     ```py
-    def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response:
-        ...
+    def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...
 
 
     get(timeout=1)  # 1s timeout
@@ -163,16 +162,14 @@ def build(
         *,
         response: Response,
         data: object,
-    ) -> _T:
-        ...
+    ) -> _T: ...
 
 
 Headers = Mapping[str, Union[str, Omit]]
 
 
 class HeadersLikeProtocol(Protocol):
-    def get(self, __key: str) -> str | None:
-        ...
+    def get(self, __key: str) -> str | None: ...
 
 
 HeadersLike = Union[Headers, HeadersLikeProtocol]
diff --git a/src/openai/_utils/_proxy.py b/src/openai/_utils/_proxy.py
index c46a62a698..ffd883e9dd 100644
--- a/src/openai/_utils/_proxy.py
+++ b/src/openai/_utils/_proxy.py
@@ -59,5 +59,4 @@ def __as_proxied__(self) -> T:
         return cast(T, self)
 
     @abstractmethod
-    def __load__(self) -> T:
-        ...
+    def __load__(self) -> T: ...
diff --git a/src/openai/_utils/_utils.py b/src/openai/_utils/_utils.py
index 34797c2905..2fc5a1c65a 100644
--- a/src/openai/_utils/_utils.py
+++ b/src/openai/_utils/_utils.py
@@ -211,20 +211,17 @@ def required_args(*variants: Sequence[str]) -> Callable[[CallableT], CallableT]:
     Example usage:
     ```py
     @overload
-    def foo(*, a: str) -> str:
-        ...
+    def foo(*, a: str) -> str: ...
 
 
     @overload
-    def foo(*, b: bool) -> str:
-        ...
+    def foo(*, b: bool) -> str: ...
 
 
     # This enforces the same constraints that a static type checker would
     # i.e. that either a or b must be passed to the function
     @required_args(["a"], ["b"])
-    def foo(*, a: str | None = None, b: bool | None = None) -> str:
-        ...
+    def foo(*, a: str | None = None, b: bool | None = None) -> str: ...
     ```
     """
 
@@ -286,18 +283,15 @@ def wrapper(*args: object, **kwargs: object) -> object:
 
 
 @overload
-def strip_not_given(obj: None) -> None:
-    ...
+def strip_not_given(obj: None) -> None: ...
 
 
 @overload
-def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]:
-    ...
+def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]: ...
 
 
 @overload
-def strip_not_given(obj: object) -> object:
-    ...
+def strip_not_given(obj: object) -> object: ...
 
 
 def strip_not_given(obj: object | None) -> object:
diff --git a/src/openai/_version.py b/src/openai/_version.py
index aed8ee29b2..73cd42e5ea 100644
--- a/src/openai/_version.py
+++ b/src/openai/_version.py
@@ -1,4 +1,4 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 __title__ = "openai"
-__version__ = "1.39.0"  # x-release-please-version
+__version__ = "1.40.0"  # x-release-please-version
diff --git a/src/openai/cli/_errors.py b/src/openai/cli/_errors.py
index 2bf06070d6..7d0292dab2 100644
--- a/src/openai/cli/_errors.py
+++ b/src/openai/cli/_errors.py
@@ -8,12 +8,10 @@
 from .._exceptions import APIError, OpenAIError
 
 
-class CLIError(OpenAIError):
-    ...
+class CLIError(OpenAIError): ...
 
 
-class SilentCLIError(CLIError):
-    ...
+class SilentCLIError(CLIError): ...
 
 
 def display_error(err: CLIError | APIError | pydantic.ValidationError) -> None:
diff --git a/src/openai/lib/__init__.py b/src/openai/lib/__init__.py
new file mode 100644
index 0000000000..5c6cb782c0
--- /dev/null
+++ b/src/openai/lib/__init__.py
@@ -0,0 +1,2 @@
+from ._tools import pydantic_function_tool as pydantic_function_tool
+from ._parsing import ResponseFormatT as ResponseFormatT
diff --git a/src/openai/lib/_parsing/__init__.py b/src/openai/lib/_parsing/__init__.py
new file mode 100644
index 0000000000..4d454c3a20
--- /dev/null
+++ b/src/openai/lib/_parsing/__init__.py
@@ -0,0 +1,12 @@
+from ._completions import (
+    ResponseFormatT as ResponseFormatT,
+    has_parseable_input,
+    has_parseable_input as has_parseable_input,
+    maybe_parse_content as maybe_parse_content,
+    validate_input_tools as validate_input_tools,
+    parse_chat_completion as parse_chat_completion,
+    get_input_tool_by_name as get_input_tool_by_name,
+    solve_response_format_t as solve_response_format_t,
+    parse_function_tool_arguments as parse_function_tool_arguments,
+    type_to_response_format_param as type_to_response_format_param,
+)
diff --git a/src/openai/lib/_parsing/_completions.py b/src/openai/lib/_parsing/_completions.py
new file mode 100644
index 0000000000..f9d1d6b351
--- /dev/null
+++ b/src/openai/lib/_parsing/_completions.py
@@ -0,0 +1,254 @@
+from __future__ import annotations
+
+import json
+from typing import TYPE_CHECKING, Any, Iterable, cast
+from typing_extensions import TypeVar, TypeGuard, assert_never
+
+import pydantic
+
+from .._tools import PydanticFunctionTool
+from ..._types import NOT_GIVEN, NotGiven
+from ..._utils import is_dict, is_given
+from ..._compat import model_parse_json
+from ..._models import construct_type_unchecked
+from .._pydantic import to_strict_json_schema
+from ...types.chat import (
+    ParsedChoice,
+    ChatCompletion,
+    ParsedFunction,
+    ParsedChatCompletion,
+    ChatCompletionMessage,
+    ParsedFunctionToolCall,
+    ChatCompletionToolParam,
+    ParsedChatCompletionMessage,
+    completion_create_params,
+)
+from ..._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError
+from ...types.shared_params import FunctionDefinition
+from ...types.chat.completion_create_params import ResponseFormat as ResponseFormatParam
+from ...types.chat.chat_completion_message_tool_call import Function
+
+ResponseFormatT = TypeVar(
+    "ResponseFormatT",
+    # if it isn't given then we don't do any parsing
+    default=None,
+)
+_default_response_format: None = None
+
+
+def validate_input_tools(
+    tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+) -> None:
+    if not is_given(tools):
+        return
+
+    for tool in tools:
+        if tool["type"] != "function":
+            raise ValueError(
+                f'Currently only `function` tool types support auto-parsing; Received `{tool["type"]}`',
+            )
+
+        strict = tool["function"].get("strict")
+        if strict is not True:
+            raise ValueError(
+                f'`{tool["function"]["name"]}` is not strict. Only `strict` function tools can be auto-parsed'
+            )
+
+
+def parse_chat_completion(
+    *,
+    response_format: type[ResponseFormatT] | completion_create_params.ResponseFormat | NotGiven,
+    input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+    chat_completion: ChatCompletion | ParsedChatCompletion[object],
+) -> ParsedChatCompletion[ResponseFormatT]:
+    if is_given(input_tools):
+        input_tools = [t for t in input_tools]
+    else:
+        input_tools = []
+
+    choices: list[ParsedChoice[ResponseFormatT]] = []
+    for choice in chat_completion.choices:
+        if choice.finish_reason == "length":
+            raise LengthFinishReasonError()
+
+        if choice.finish_reason == "content_filter":
+            raise ContentFilterFinishReasonError()
+
+        message = choice.message
+
+        tool_calls: list[ParsedFunctionToolCall] = []
+        if message.tool_calls:
+            for tool_call in message.tool_calls:
+                if tool_call.type == "function":
+                    tool_call_dict = tool_call.to_dict()
+                    tool_calls.append(
+                        construct_type_unchecked(
+                            value={
+                                **tool_call_dict,
+                                "function": {
+                                    **cast(Any, tool_call_dict["function"]),
+                                    "parsed_arguments": parse_function_tool_arguments(
+                                        input_tools=input_tools, function=tool_call.function
+                                    ),
+                                },
+                            },
+                            type_=ParsedFunctionToolCall,
+                        )
+                    )
+                elif TYPE_CHECKING:  # type: ignore[unreachable]
+                    assert_never(tool_call)
+                else:
+                    tool_calls.append(tool_call)
+
+        choices.append(
+            construct_type_unchecked(
+                type_=cast(Any, ParsedChoice)[solve_response_format_t(response_format)],
+                value={
+                    **choice.to_dict(),
+                    "message": {
+                        **message.to_dict(),
+                        "parsed": maybe_parse_content(
+                            response_format=response_format,
+                            message=message,
+                        ),
+                        "tool_calls": tool_calls,
+                    },
+                },
+            )
+        )
+
+    return cast(
+        ParsedChatCompletion[ResponseFormatT],
+        construct_type_unchecked(
+            type_=cast(Any, ParsedChatCompletion)[solve_response_format_t(response_format)],
+            value={
+                **chat_completion.to_dict(),
+                "choices": choices,
+            },
+        ),
+    )
+
+
+def get_input_tool_by_name(*, input_tools: list[ChatCompletionToolParam], name: str) -> ChatCompletionToolParam | None:
+    return next((t for t in input_tools if t.get("function", {}).get("name") == name), None)
+
+
+def parse_function_tool_arguments(
+    *, input_tools: list[ChatCompletionToolParam], function: Function | ParsedFunction
+) -> object:
+    input_tool = get_input_tool_by_name(input_tools=input_tools, name=function.name)
+    if not input_tool:
+        return None
+
+    input_fn = cast(object, input_tool.get("function"))
+    if isinstance(input_fn, PydanticFunctionTool):
+        return model_parse_json(input_fn.model, function.arguments)
+
+    input_fn = cast(FunctionDefinition, input_fn)
+
+    if not input_fn.get("strict"):
+        return None
+
+    return json.loads(function.arguments)
+
+
+def maybe_parse_content(
+    *,
+    response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+    message: ChatCompletionMessage | ParsedChatCompletionMessage[object],
+) -> ResponseFormatT | None:
+    if has_rich_response_format(response_format) and message.content is not None and not message.refusal:
+        return _parse_content(response_format, message.content)
+
+    return None
+
+
+def solve_response_format_t(
+    response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+) -> type[ResponseFormatT]:
+    """Return the runtime type for the given response format.
+
+    If no response format is given, or if we won't auto-parse the response format
+    then we default to `None`.
+    """
+    if has_rich_response_format(response_format):
+        return response_format
+
+    return cast("type[ResponseFormatT]", _default_response_format)
+
+
+def has_parseable_input(
+    *,
+    response_format: type | ResponseFormatParam | NotGiven,
+    input_tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+) -> bool:
+    if has_rich_response_format(response_format):
+        return True
+
+    for input_tool in input_tools or []:
+        if is_parseable_tool(input_tool):
+            return True
+
+    return False
+
+
+def has_rich_response_format(
+    response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+) -> TypeGuard[type[ResponseFormatT]]:
+    if not is_given(response_format):
+        return False
+
+    if is_response_format_param(response_format):
+        return False
+
+    return True
+
+
+def is_response_format_param(response_format: object) -> TypeGuard[ResponseFormatParam]:
+    return is_dict(response_format)
+
+
+def is_parseable_tool(input_tool: ChatCompletionToolParam) -> bool:
+    input_fn = cast(object, input_tool.get("function"))
+    if isinstance(input_fn, PydanticFunctionTool):
+        return True
+
+    return cast(FunctionDefinition, input_fn).get("strict") or False
+
+
+def is_basemodel_type(typ: type) -> TypeGuard[type[pydantic.BaseModel]]:
+    return issubclass(typ, pydantic.BaseModel)
+
+
+def _parse_content(response_format: type[ResponseFormatT], content: str) -> ResponseFormatT:
+    if is_basemodel_type(response_format):
+        return cast(ResponseFormatT, model_parse_json(response_format, content))
+
+    raise TypeError(f"Unable to automatically parse response format type {response_format}")
+
+
+def type_to_response_format_param(
+    response_format: type | completion_create_params.ResponseFormat | NotGiven,
+) -> ResponseFormatParam | NotGiven:
+    if not is_given(response_format):
+        return NOT_GIVEN
+
+    if is_response_format_param(response_format):
+        return response_format
+
+    # type checkers don't narrow the negation of a `TypeGuard` as it isn't
+    # a safe default behaviour but we know that at this point the `response_format`
+    # can only be a `type`
+    response_format = cast(type, response_format)
+
+    if not is_basemodel_type(response_format):
+        raise TypeError(f"Unsupported response_format type - {response_format}")
+
+    return {
+        "type": "json_schema",
+        "json_schema": {
+            "schema": to_strict_json_schema(response_format),
+            "name": response_format.__name__,
+            "strict": True,
+        },
+    }
diff --git a/src/openai/lib/_pydantic.py b/src/openai/lib/_pydantic.py
new file mode 100644
index 0000000000..967ad5de57
--- /dev/null
+++ b/src/openai/lib/_pydantic.py
@@ -0,0 +1,71 @@
+from __future__ import annotations
+
+from typing import Any
+from typing_extensions import TypeGuard
+
+import pydantic
+
+from .._utils import is_dict as _is_dict, is_list
+from .._compat import model_json_schema
+
+
+def to_strict_json_schema(model: type[pydantic.BaseModel]) -> dict[str, Any]:
+    return _ensure_strict_json_schema(model_json_schema(model), path=())
+
+
+def _ensure_strict_json_schema(
+    json_schema: object,
+    path: tuple[str, ...],
+) -> dict[str, Any]:
+    """Mutates the given JSON schema to ensure it conforms to the `strict` standard
+    that the API expects.
+    """
+    if not is_dict(json_schema):
+        raise TypeError(f"Expected {json_schema} to be a dictionary; path={path}")
+
+    typ = json_schema.get("type")
+    if typ == "object" and "additionalProperties" not in json_schema:
+        json_schema["additionalProperties"] = False
+
+    # object types
+    # { 'type': 'object', 'properties': { 'a':  {...} } }
+    properties = json_schema.get("properties")
+    if is_dict(properties):
+        json_schema["required"] = [prop for prop in properties.keys()]
+        json_schema["properties"] = {
+            key: _ensure_strict_json_schema(prop_schema, path=(*path, "properties", key))
+            for key, prop_schema in properties.items()
+        }
+
+    # arrays
+    # { 'type': 'array', 'items': {...} }
+    items = json_schema.get("items")
+    if is_dict(items):
+        json_schema["items"] = _ensure_strict_json_schema(items, path=(*path, "items"))
+
+    # unions
+    any_of = json_schema.get("anyOf")
+    if is_list(any_of):
+        json_schema["anyOf"] = [
+            _ensure_strict_json_schema(variant, path=(*path, "anyOf", str(i))) for i, variant in enumerate(any_of)
+        ]
+
+    # intersections
+    all_of = json_schema.get("allOf")
+    if is_list(all_of):
+        json_schema["allOf"] = [
+            _ensure_strict_json_schema(entry, path=(*path, "anyOf", str(i))) for i, entry in enumerate(all_of)
+        ]
+
+    defs = json_schema.get("$defs")
+    if is_dict(defs):
+        for def_name, def_schema in defs.items():
+            _ensure_strict_json_schema(def_schema, path=(*path, "$defs", def_name))
+
+    return json_schema
+
+
+def is_dict(obj: object) -> TypeGuard[dict[str, object]]:
+    # just pretend that we know there are only `str` keys
+    # as that check is not worth the performance cost
+    return _is_dict(obj)
diff --git a/src/openai/lib/_tools.py b/src/openai/lib/_tools.py
new file mode 100644
index 0000000000..8478ed676c
--- /dev/null
+++ b/src/openai/lib/_tools.py
@@ -0,0 +1,54 @@
+from __future__ import annotations
+
+from typing import Any, Dict, cast
+
+import pydantic
+
+from ._pydantic import to_strict_json_schema
+from ..types.chat import ChatCompletionToolParam
+from ..types.shared_params import FunctionDefinition
+
+
+class PydanticFunctionTool(Dict[str, Any]):
+    """Dictionary wrapper so we can pass the given base model
+    throughout the entire request stack without having to special
+    case it.
+    """
+
+    model: type[pydantic.BaseModel]
+
+    def __init__(self, defn: FunctionDefinition, model: type[pydantic.BaseModel]) -> None:
+        super().__init__(defn)
+        self.model = model
+
+    def cast(self) -> FunctionDefinition:
+        return cast(FunctionDefinition, self)
+
+
+def pydantic_function_tool(
+    model: type[pydantic.BaseModel],
+    *,
+    name: str | None = None,  # inferred from class name by default
+    description: str | None = None,  # inferred from class docstring by default
+) -> ChatCompletionToolParam:
+    if description is None:
+        # note: we intentionally don't use `.getdoc()` to avoid
+        # including pydantic's docstrings
+        description = model.__doc__
+
+    function = PydanticFunctionTool(
+        {
+            "name": name or model.__name__,
+            "strict": True,
+            "parameters": to_strict_json_schema(model),
+        },
+        model,
+    ).cast()
+
+    if description is not None:
+        function["description"] = description
+
+    return {
+        "type": "function",
+        "function": function,
+    }
diff --git a/src/openai/lib/azure.py b/src/openai/lib/azure.py
index 433486fded..ef64137de4 100644
--- a/src/openai/lib/azure.py
+++ b/src/openai/lib/azure.py
@@ -80,8 +80,7 @@ def __init__(
         default_query: Mapping[str, object] | None = None,
         http_client: httpx.Client | None = None,
         _strict_response_validation: bool = False,
-    ) -> None:
-        ...
+    ) -> None: ...
 
     @overload
     def __init__(
@@ -99,8 +98,7 @@ def __init__(
         default_query: Mapping[str, object] | None = None,
         http_client: httpx.Client | None = None,
         _strict_response_validation: bool = False,
-    ) -> None:
-        ...
+    ) -> None: ...
 
     @overload
     def __init__(
@@ -118,8 +116,7 @@ def __init__(
         default_query: Mapping[str, object] | None = None,
         http_client: httpx.Client | None = None,
         _strict_response_validation: bool = False,
-    ) -> None:
-        ...
+    ) -> None: ...
 
     def __init__(
         self,
@@ -321,8 +318,7 @@ def __init__(
         default_query: Mapping[str, object] | None = None,
         http_client: httpx.AsyncClient | None = None,
         _strict_response_validation: bool = False,
-    ) -> None:
-        ...
+    ) -> None: ...
 
     @overload
     def __init__(
@@ -341,8 +337,7 @@ def __init__(
         default_query: Mapping[str, object] | None = None,
         http_client: httpx.AsyncClient | None = None,
         _strict_response_validation: bool = False,
-    ) -> None:
-        ...
+    ) -> None: ...
 
     @overload
     def __init__(
@@ -361,8 +356,7 @@ def __init__(
         default_query: Mapping[str, object] | None = None,
         http_client: httpx.AsyncClient | None = None,
         _strict_response_validation: bool = False,
-    ) -> None:
-        ...
+    ) -> None: ...
 
     def __init__(
         self,
diff --git a/src/openai/lib/streaming/_deltas.py b/src/openai/lib/streaming/_deltas.py
new file mode 100644
index 0000000000..a5e1317612
--- /dev/null
+++ b/src/openai/lib/streaming/_deltas.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+from ..._utils import is_dict, is_list
+
+
+def accumulate_delta(acc: dict[object, object], delta: dict[object, object]) -> dict[object, object]:
+    for key, delta_value in delta.items():
+        if key not in acc:
+            acc[key] = delta_value
+            continue
+
+        acc_value = acc[key]
+        if acc_value is None:
+            acc[key] = delta_value
+            continue
+
+        # the `index` property is used in arrays of objects so it should
+        # not be accumulated like other values e.g.
+        # [{'foo': 'bar', 'index': 0}]
+        #
+        # the same applies to `type` properties as they're used for
+        # discriminated unions
+        if key == "index" or key == "type":
+            acc[key] = delta_value
+            continue
+
+        if isinstance(acc_value, str) and isinstance(delta_value, str):
+            acc_value += delta_value
+        elif isinstance(acc_value, (int, float)) and isinstance(delta_value, (int, float)):
+            acc_value += delta_value
+        elif is_dict(acc_value) and is_dict(delta_value):
+            acc_value = accumulate_delta(acc_value, delta_value)
+        elif is_list(acc_value) and is_list(delta_value):
+            # for lists of non-dictionary items we'll only ever get new entries
+            # in the array, existing entries will never be changed
+            if all(isinstance(x, (str, int, float)) for x in acc_value):
+                acc_value.extend(delta_value)
+                continue
+
+            for delta_entry in delta_value:
+                if not is_dict(delta_entry):
+                    raise TypeError(f"Unexpected list delta entry is not a dictionary: {delta_entry}")
+
+                try:
+                    index = delta_entry["index"]
+                except KeyError as exc:
+                    raise RuntimeError(f"Expected list delta entry to have an `index` key; {delta_entry}") from exc
+
+                if not isinstance(index, int):
+                    raise TypeError(f"Unexpected, list delta entry `index` value is not an integer; {index}")
+
+                try:
+                    acc_entry = acc_value[index]
+                except IndexError:
+                    acc_value.insert(index, delta_entry)
+                else:
+                    if not is_dict(acc_entry):
+                        raise TypeError("not handled yet")
+
+                    acc_value[index] = accumulate_delta(acc_entry, delta_entry)
+
+        acc[key] = acc_value
+
+    return acc
diff --git a/src/openai/lib/streaming/chat/__init__.py b/src/openai/lib/streaming/chat/__init__.py
new file mode 100644
index 0000000000..5881c39b9a
--- /dev/null
+++ b/src/openai/lib/streaming/chat/__init__.py
@@ -0,0 +1,26 @@
+from ._types import (
+    ParsedChoiceSnapshot as ParsedChoiceSnapshot,
+    ParsedChatCompletionSnapshot as ParsedChatCompletionSnapshot,
+    ParsedChatCompletionMessageSnapshot as ParsedChatCompletionMessageSnapshot,
+)
+from ._events import (
+    ChunkEvent as ChunkEvent,
+    ContentDoneEvent as ContentDoneEvent,
+    RefusalDoneEvent as RefusalDoneEvent,
+    ContentDeltaEvent as ContentDeltaEvent,
+    RefusalDeltaEvent as RefusalDeltaEvent,
+    LogprobsContentDoneEvent as LogprobsContentDoneEvent,
+    LogprobsRefusalDoneEvent as LogprobsRefusalDoneEvent,
+    ChatCompletionStreamEvent as ChatCompletionStreamEvent,
+    LogprobsContentDeltaEvent as LogprobsContentDeltaEvent,
+    LogprobsRefusalDeltaEvent as LogprobsRefusalDeltaEvent,
+    ParsedChatCompletionSnapshot as ParsedChatCompletionSnapshot,
+    FunctionToolCallArgumentsDoneEvent as FunctionToolCallArgumentsDoneEvent,
+    FunctionToolCallArgumentsDeltaEvent as FunctionToolCallArgumentsDeltaEvent,
+)
+from ._completions import (
+    ChatCompletionStream as ChatCompletionStream,
+    AsyncChatCompletionStream as AsyncChatCompletionStream,
+    ChatCompletionStreamManager as ChatCompletionStreamManager,
+    AsyncChatCompletionStreamManager as AsyncChatCompletionStreamManager,
+)
diff --git a/src/openai/lib/streaming/chat/_completions.py b/src/openai/lib/streaming/chat/_completions.py
new file mode 100644
index 0000000000..342a5e2b95
--- /dev/null
+++ b/src/openai/lib/streaming/chat/_completions.py
@@ -0,0 +1,724 @@
+from __future__ import annotations
+
+import inspect
+from types import TracebackType
+from typing import TYPE_CHECKING, Any, Generic, Callable, Iterable, Awaitable, AsyncIterator, cast
+from typing_extensions import Self, Iterator, assert_never
+
+from jiter import from_json
+
+from ._types import ParsedChoiceSnapshot, ParsedChatCompletionSnapshot, ParsedChatCompletionMessageSnapshot
+from ._events import (
+    ChunkEvent,
+    ContentDoneEvent,
+    RefusalDoneEvent,
+    ContentDeltaEvent,
+    RefusalDeltaEvent,
+    LogprobsContentDoneEvent,
+    LogprobsRefusalDoneEvent,
+    ChatCompletionStreamEvent,
+    LogprobsContentDeltaEvent,
+    LogprobsRefusalDeltaEvent,
+    FunctionToolCallArgumentsDoneEvent,
+    FunctionToolCallArgumentsDeltaEvent,
+)
+from .._deltas import accumulate_delta
+from ...._types import NOT_GIVEN, NotGiven
+from ...._utils import is_given, consume_sync_iterator, consume_async_iterator
+from ...._compat import model_dump
+from ...._models import build, construct_type
+from ..._parsing import (
+    ResponseFormatT,
+    has_parseable_input,
+    maybe_parse_content,
+    parse_chat_completion,
+    get_input_tool_by_name,
+    solve_response_format_t,
+    parse_function_tool_arguments,
+)
+from ...._streaming import Stream, AsyncStream
+from ....types.chat import ChatCompletionChunk, ParsedChatCompletion, ChatCompletionToolParam
+from ...._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError
+from ....types.chat.chat_completion import ChoiceLogprobs
+from ....types.chat.chat_completion_chunk import Choice as ChoiceChunk
+from ....types.chat.completion_create_params import ResponseFormat as ResponseFormatParam
+
+
+class ChatCompletionStream(Generic[ResponseFormatT]):
+    """Wrapper over the Chat Completions streaming API that adds helpful
+    events such as `content.done`, supports automatically parsing
+    responses & tool calls and accumulates a `ChatCompletion` object
+    from each individual chunk.
+
+    https://platform.openai.com/docs/api-reference/streaming
+    """
+
+    def __init__(
+        self,
+        *,
+        raw_stream: Stream[ChatCompletionChunk],
+        response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+        input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+    ) -> None:
+        self._raw_stream = raw_stream
+        self._response = raw_stream.response
+        self._iterator = self.__stream__()
+        self._state = ChatCompletionStreamState(response_format=response_format, input_tools=input_tools)
+
+    def __next__(self) -> ChatCompletionStreamEvent[ResponseFormatT]:
+        return self._iterator.__next__()
+
+    def __iter__(self) -> Iterator[ChatCompletionStreamEvent[ResponseFormatT]]:
+        for item in self._iterator:
+            yield item
+
+    def __enter__(self) -> Self:
+        return self
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        self.close()
+
+    def close(self) -> None:
+        """
+        Close the response and release the connection.
+
+        Automatically called if the response body is read to completion.
+        """
+        self._response.close()
+
+    def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]:
+        """Waits until the stream has been read to completion and returns
+        the accumulated `ParsedChatCompletion` object.
+
+        If you passed a class type to `.stream()`, the `completion.choices[0].message.parsed`
+        property will be the content deserialised into that class, if there was any content returned
+        by the API.
+        """
+        self.until_done()
+        return self._state.get_final_completion()
+
+    def until_done(self) -> Self:
+        """Blocks until the stream has been consumed."""
+        consume_sync_iterator(self)
+        return self
+
+    @property
+    def current_completion_snapshot(self) -> ParsedChatCompletionSnapshot:
+        return self._state.current_completion_snapshot
+
+    def __stream__(self) -> Iterator[ChatCompletionStreamEvent[ResponseFormatT]]:
+        for sse_event in self._raw_stream:
+            events_to_fire = self._state.handle_chunk(sse_event)
+            for event in events_to_fire:
+                yield event
+
+
+class ChatCompletionStreamManager(Generic[ResponseFormatT]):
+    """Context manager over a `ChatCompletionStream` that is returned by `.stream()`.
+
+    This context manager ensures the response cannot be leaked if you don't read
+    the stream to completion.
+
+    Usage:
+    ```py
+    with client.beta.chat.completions.stream(...) as stream:
+        for event in stream:
+            ...
+    ```
+    """
+
+    def __init__(
+        self,
+        api_request: Callable[[], Stream[ChatCompletionChunk]],
+        *,
+        response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+        input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+    ) -> None:
+        self.__stream: ChatCompletionStream[ResponseFormatT] | None = None
+        self.__api_request = api_request
+        self.__response_format = response_format
+        self.__input_tools = input_tools
+
+    def __enter__(self) -> ChatCompletionStream[ResponseFormatT]:
+        raw_stream = self.__api_request()
+
+        self.__stream = ChatCompletionStream(
+            raw_stream=raw_stream,
+            response_format=self.__response_format,
+            input_tools=self.__input_tools,
+        )
+
+        return self.__stream
+
+    def __exit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        if self.__stream is not None:
+            self.__stream.close()
+
+
+class AsyncChatCompletionStream(Generic[ResponseFormatT]):
+    """Wrapper over the Chat Completions streaming API that adds helpful
+    events such as `content.done`, supports automatically parsing
+    responses & tool calls and accumulates a `ChatCompletion` object
+    from each individual chunk.
+
+    https://platform.openai.com/docs/api-reference/streaming
+    """
+
+    def __init__(
+        self,
+        *,
+        raw_stream: AsyncStream[ChatCompletionChunk],
+        response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+        input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+    ) -> None:
+        self._raw_stream = raw_stream
+        self._response = raw_stream.response
+        self._iterator = self.__stream__()
+        self._state = ChatCompletionStreamState(response_format=response_format, input_tools=input_tools)
+
+    async def __anext__(self) -> ChatCompletionStreamEvent[ResponseFormatT]:
+        return await self._iterator.__anext__()
+
+    async def __aiter__(self) -> AsyncIterator[ChatCompletionStreamEvent[ResponseFormatT]]:
+        async for item in self._iterator:
+            yield item
+
+    async def __aenter__(self) -> Self:
+        return self
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        await self.close()
+
+    async def close(self) -> None:
+        """
+        Close the response and release the connection.
+
+        Automatically called if the response body is read to completion.
+        """
+        await self._response.aclose()
+
+    async def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]:
+        """Waits until the stream has been read to completion and returns
+        the accumulated `ParsedChatCompletion` object.
+
+        If you passed a class type to `.stream()`, the `completion.choices[0].message.parsed`
+        property will be the content deserialised into that class, if there was any content returned
+        by the API.
+        """
+        await self.until_done()
+        return self._state.get_final_completion()
+
+    async def until_done(self) -> Self:
+        """Blocks until the stream has been consumed."""
+        await consume_async_iterator(self)
+        return self
+
+    @property
+    def current_completion_snapshot(self) -> ParsedChatCompletionSnapshot:
+        return self._state.current_completion_snapshot
+
+    async def __stream__(self) -> AsyncIterator[ChatCompletionStreamEvent[ResponseFormatT]]:
+        async for sse_event in self._raw_stream:
+            events_to_fire = self._state.handle_chunk(sse_event)
+            for event in events_to_fire:
+                yield event
+
+
+class AsyncChatCompletionStreamManager(Generic[ResponseFormatT]):
+    """Context manager over a `AsyncChatCompletionStream` that is returned by `.stream()`.
+
+    This context manager ensures the response cannot be leaked if you don't read
+    the stream to completion.
+
+    Usage:
+    ```py
+    async with client.beta.chat.completions.stream(...) as stream:
+        for event in stream:
+            ...
+    ```
+    """
+
+    def __init__(
+        self,
+        api_request: Awaitable[AsyncStream[ChatCompletionChunk]],
+        *,
+        response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+        input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+    ) -> None:
+        self.__stream: AsyncChatCompletionStream[ResponseFormatT] | None = None
+        self.__api_request = api_request
+        self.__response_format = response_format
+        self.__input_tools = input_tools
+
+    async def __aenter__(self) -> AsyncChatCompletionStream[ResponseFormatT]:
+        raw_stream = await self.__api_request
+
+        self.__stream = AsyncChatCompletionStream(
+            raw_stream=raw_stream,
+            response_format=self.__response_format,
+            input_tools=self.__input_tools,
+        )
+
+        return self.__stream
+
+    async def __aexit__(
+        self,
+        exc_type: type[BaseException] | None,
+        exc: BaseException | None,
+        exc_tb: TracebackType | None,
+    ) -> None:
+        if self.__stream is not None:
+            await self.__stream.close()
+
+
+class ChatCompletionStreamState(Generic[ResponseFormatT]):
+    def __init__(
+        self,
+        *,
+        input_tools: Iterable[ChatCompletionToolParam] | NotGiven,
+        response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+    ) -> None:
+        self.__current_completion_snapshot: ParsedChatCompletionSnapshot | None = None
+        self.__choice_event_states: list[ChoiceEventState] = []
+
+        self._input_tools = [tool for tool in input_tools] if is_given(input_tools) else []
+        self._response_format = response_format
+        self._rich_response_format: type | NotGiven = response_format if inspect.isclass(response_format) else NOT_GIVEN
+
+    def get_final_completion(self) -> ParsedChatCompletion[ResponseFormatT]:
+        return parse_chat_completion(
+            chat_completion=self.current_completion_snapshot,
+            response_format=self._rich_response_format,
+            input_tools=self._input_tools,
+        )
+
+    @property
+    def current_completion_snapshot(self) -> ParsedChatCompletionSnapshot:
+        assert self.__current_completion_snapshot is not None
+        return self.__current_completion_snapshot
+
+    def handle_chunk(self, chunk: ChatCompletionChunk) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
+        """Accumulate a new chunk into the snapshot and returns a list of events to yield."""
+        self.__current_completion_snapshot = self._accumulate_chunk(chunk)
+
+        return self._build_events(
+            chunk=chunk,
+            completion_snapshot=self.__current_completion_snapshot,
+        )
+
+    def _get_choice_state(self, choice: ChoiceChunk) -> ChoiceEventState:
+        try:
+            return self.__choice_event_states[choice.index]
+        except IndexError:
+            choice_state = ChoiceEventState(input_tools=self._input_tools)
+            self.__choice_event_states.append(choice_state)
+            return choice_state
+
+    def _accumulate_chunk(self, chunk: ChatCompletionChunk) -> ParsedChatCompletionSnapshot:
+        completion_snapshot = self.__current_completion_snapshot
+
+        if completion_snapshot is None:
+            return _convert_initial_chunk_into_snapshot(chunk)
+
+        for choice in chunk.choices:
+            try:
+                choice_snapshot = completion_snapshot.choices[choice.index]
+                previous_tool_calls = choice_snapshot.message.tool_calls or []
+
+                choice_snapshot.message = cast(
+                    ParsedChatCompletionMessageSnapshot,
+                    construct_type(
+                        type_=ParsedChatCompletionMessageSnapshot,
+                        value=accumulate_delta(
+                            cast(
+                                "dict[object, object]",
+                                model_dump(
+                                    choice_snapshot.message,
+                                    # we don't want to serialise / deserialise our custom properties
+                                    # as they won't appear in the delta and we don't want to have to
+                                    # continuosly reparse the content
+                                    exclude={
+                                        "parsed": True,
+                                        "tool_calls": {
+                                            idx: {"function": {"parsed_arguments": True}}
+                                            for idx, _ in enumerate(choice_snapshot.message.tool_calls or [])
+                                        },
+                                    },
+                                ),
+                            ),
+                            cast("dict[object, object]", choice.delta.to_dict()),
+                        ),
+                    ),
+                )
+
+                # ensure tools that have already been parsed are added back into the newly
+                # constructed message snapshot
+                for tool_index, prev_tool in enumerate(previous_tool_calls):
+                    new_tool = (choice_snapshot.message.tool_calls or [])[tool_index]
+
+                    if prev_tool.type == "function":
+                        assert new_tool.type == "function"
+                        new_tool.function.parsed_arguments = prev_tool.function.parsed_arguments
+                    elif TYPE_CHECKING:  # type: ignore[unreachable]
+                        assert_never(prev_tool)
+            except IndexError:
+                choice_snapshot = cast(
+                    ParsedChoiceSnapshot,
+                    construct_type(
+                        type_=ParsedChoiceSnapshot,
+                        value={
+                            **choice.model_dump(exclude_unset=True, exclude={"delta"}),
+                            "message": choice.delta.to_dict(),
+                        },
+                    ),
+                )
+                completion_snapshot.choices.append(choice_snapshot)
+
+            if choice.finish_reason:
+                choice_snapshot.finish_reason = choice.finish_reason
+
+                if has_parseable_input(response_format=self._response_format, input_tools=self._input_tools):
+                    if choice.finish_reason == "length":
+                        raise LengthFinishReasonError()
+
+                    if choice.finish_reason == "content_filter":
+                        raise ContentFilterFinishReasonError()
+
+            if (
+                choice_snapshot.message.content
+                and not choice_snapshot.message.refusal
+                and is_given(self._rich_response_format)
+            ):
+                choice_snapshot.message.parsed = from_json(
+                    bytes(choice_snapshot.message.content, "utf-8"),
+                    partial_mode=True,
+                )
+
+            for tool_call_chunk in choice.delta.tool_calls or []:
+                tool_call_snapshot = (choice_snapshot.message.tool_calls or [])[tool_call_chunk.index]
+
+                if tool_call_snapshot.type == "function":
+                    input_tool = get_input_tool_by_name(
+                        input_tools=self._input_tools, name=tool_call_snapshot.function.name
+                    )
+
+                    if (
+                        input_tool
+                        and input_tool.get("function", {}).get("strict")
+                        and tool_call_snapshot.function.arguments
+                    ):
+                        tool_call_snapshot.function.parsed_arguments = from_json(
+                            bytes(tool_call_snapshot.function.arguments, "utf-8"),
+                            partial_mode=True,
+                        )
+                elif TYPE_CHECKING:  # type: ignore[unreachable]
+                    assert_never(tool_call_snapshot)
+
+            if choice.logprobs is not None:
+                if choice_snapshot.logprobs is None:
+                    choice_snapshot.logprobs = build(
+                        ChoiceLogprobs,
+                        content=choice.logprobs.content,
+                        refusal=choice.logprobs.refusal,
+                    )
+                else:
+                    if choice.logprobs.content:
+                        if choice_snapshot.logprobs.content is None:
+                            choice_snapshot.logprobs.content = []
+
+                        choice_snapshot.logprobs.content.extend(choice.logprobs.content)
+
+                    if choice.logprobs.refusal:
+                        if choice_snapshot.logprobs.refusal is None:
+                            choice_snapshot.logprobs.refusal = []
+
+                        choice_snapshot.logprobs.refusal.extend(choice.logprobs.refusal)
+
+        completion_snapshot.usage = chunk.usage
+        completion_snapshot.system_fingerprint = chunk.system_fingerprint
+
+        return completion_snapshot
+
+    def _build_events(
+        self,
+        *,
+        chunk: ChatCompletionChunk,
+        completion_snapshot: ParsedChatCompletionSnapshot,
+    ) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
+        events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
+
+        events_to_fire.append(
+            build(ChunkEvent, type="chunk", chunk=chunk, snapshot=completion_snapshot),
+        )
+
+        for choice in chunk.choices:
+            choice_state = self._get_choice_state(choice)
+            choice_snapshot = completion_snapshot.choices[choice.index]
+
+            if choice.delta.content is not None and choice_snapshot.message.content is not None:
+                events_to_fire.append(
+                    build(
+                        ContentDeltaEvent,
+                        type="content.delta",
+                        delta=choice.delta.content,
+                        snapshot=choice_snapshot.message.content,
+                        parsed=choice_snapshot.message.parsed,
+                    )
+                )
+
+            if choice.delta.refusal is not None and choice_snapshot.message.refusal is not None:
+                events_to_fire.append(
+                    build(
+                        RefusalDeltaEvent,
+                        type="refusal.delta",
+                        delta=choice.delta.refusal,
+                        snapshot=choice_snapshot.message.refusal,
+                    )
+                )
+
+            if choice.delta.tool_calls:
+                tool_calls = choice_snapshot.message.tool_calls
+                assert tool_calls is not None
+
+                for tool_call_delta in choice.delta.tool_calls:
+                    tool_call = tool_calls[tool_call_delta.index]
+
+                    if tool_call.type == "function":
+                        assert tool_call_delta.function is not None
+                        events_to_fire.append(
+                            build(
+                                FunctionToolCallArgumentsDeltaEvent,
+                                type="tool_calls.function.arguments.delta",
+                                name=tool_call.function.name,
+                                index=tool_call_delta.index,
+                                arguments=tool_call.function.arguments,
+                                parsed_arguments=tool_call.function.parsed_arguments,
+                                arguments_delta=tool_call_delta.function.arguments or "",
+                            )
+                        )
+                    elif TYPE_CHECKING:  # type: ignore[unreachable]
+                        assert_never(tool_call)
+
+            if choice.logprobs is not None and choice_snapshot.logprobs is not None:
+                if choice.logprobs.content and choice_snapshot.logprobs.content:
+                    events_to_fire.append(
+                        build(
+                            LogprobsContentDeltaEvent,
+                            type="logprobs.content.delta",
+                            content=choice.logprobs.content,
+                            snapshot=choice_snapshot.logprobs.content,
+                        ),
+                    )
+
+                if choice.logprobs.refusal and choice_snapshot.logprobs.refusal:
+                    events_to_fire.append(
+                        build(
+                            LogprobsRefusalDeltaEvent,
+                            type="logprobs.refusal.delta",
+                            refusal=choice.logprobs.refusal,
+                            snapshot=choice_snapshot.logprobs.refusal,
+                        ),
+                    )
+
+            events_to_fire.extend(
+                choice_state.get_done_events(
+                    choice_chunk=choice,
+                    choice_snapshot=choice_snapshot,
+                    response_format=self._response_format,
+                )
+            )
+
+        return events_to_fire
+
+
+class ChoiceEventState:
+    def __init__(self, *, input_tools: list[ChatCompletionToolParam]) -> None:
+        self._input_tools = input_tools
+
+        self._content_done = False
+        self._refusal_done = False
+        self._logprobs_content_done = False
+        self._logprobs_refusal_done = False
+        self._done_tool_calls: set[int] = set()
+        self.__current_tool_call_index: int | None = None
+
+    def get_done_events(
+        self,
+        *,
+        choice_chunk: ChoiceChunk,
+        choice_snapshot: ParsedChoiceSnapshot,
+        response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+    ) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
+        events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
+
+        if choice_snapshot.finish_reason:
+            events_to_fire.extend(
+                self._content_done_events(choice_snapshot=choice_snapshot, response_format=response_format)
+            )
+
+            if (
+                self.__current_tool_call_index is not None
+                and self.__current_tool_call_index not in self._done_tool_calls
+            ):
+                self._add_tool_done_event(
+                    events_to_fire=events_to_fire,
+                    choice_snapshot=choice_snapshot,
+                    tool_index=self.__current_tool_call_index,
+                )
+
+        for tool_call in choice_chunk.delta.tool_calls or []:
+            if self.__current_tool_call_index != tool_call.index:
+                events_to_fire.extend(
+                    self._content_done_events(choice_snapshot=choice_snapshot, response_format=response_format)
+                )
+
+                if self.__current_tool_call_index is not None:
+                    self._add_tool_done_event(
+                        events_to_fire=events_to_fire,
+                        choice_snapshot=choice_snapshot,
+                        tool_index=self.__current_tool_call_index,
+                    )
+
+            self.__current_tool_call_index = tool_call.index
+
+        return events_to_fire
+
+    def _content_done_events(
+        self,
+        *,
+        choice_snapshot: ParsedChoiceSnapshot,
+        response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
+    ) -> list[ChatCompletionStreamEvent[ResponseFormatT]]:
+        events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
+
+        if choice_snapshot.message.content and not self._content_done:
+            self._content_done = True
+
+            parsed = maybe_parse_content(
+                response_format=response_format,
+                message=choice_snapshot.message,
+            )
+
+            # update the parsed content to now use the richer `response_format`
+            # as opposed to the raw JSON-parsed object as the content is now
+            # complete and can be fully validated.
+            choice_snapshot.message.parsed = parsed
+
+            events_to_fire.append(
+                build(
+                    # we do this dance so that when the `ContentDoneEvent` instance
+                    # is printed at runtime the class name will include the solved
+                    # type variable, e.g. `ContentDoneEvent[MyModelType]`
+                    cast(  # pyright: ignore[reportUnnecessaryCast]
+                        "type[ContentDoneEvent[ResponseFormatT]]",
+                        cast(Any, ContentDoneEvent)[solve_response_format_t(response_format)],
+                    ),
+                    type="content.done",
+                    content=choice_snapshot.message.content,
+                    parsed=parsed,
+                ),
+            )
+
+        if choice_snapshot.message.refusal is not None and not self._refusal_done:
+            self._refusal_done = True
+            events_to_fire.append(
+                build(RefusalDoneEvent, type="refusal.done", refusal=choice_snapshot.message.refusal),
+            )
+
+        if (
+            choice_snapshot.logprobs is not None
+            and choice_snapshot.logprobs.content is not None
+            and not self._logprobs_content_done
+        ):
+            self._logprobs_content_done = True
+            events_to_fire.append(
+                build(LogprobsContentDoneEvent, type="logprobs.content.done", content=choice_snapshot.logprobs.content),
+            )
+
+        if (
+            choice_snapshot.logprobs is not None
+            and choice_snapshot.logprobs.refusal is not None
+            and not self._logprobs_refusal_done
+        ):
+            self._logprobs_refusal_done = True
+            events_to_fire.append(
+                build(LogprobsRefusalDoneEvent, type="logprobs.refusal.done", refusal=choice_snapshot.logprobs.refusal),
+            )
+
+        return events_to_fire
+
+    def _add_tool_done_event(
+        self,
+        *,
+        events_to_fire: list[ChatCompletionStreamEvent[ResponseFormatT]],
+        choice_snapshot: ParsedChoiceSnapshot,
+        tool_index: int,
+    ) -> None:
+        if tool_index in self._done_tool_calls:
+            return
+
+        self._done_tool_calls.add(tool_index)
+
+        assert choice_snapshot.message.tool_calls is not None
+        tool_call_snapshot = choice_snapshot.message.tool_calls[tool_index]
+
+        if tool_call_snapshot.type == "function":
+            parsed_arguments = parse_function_tool_arguments(
+                input_tools=self._input_tools, function=tool_call_snapshot.function
+            )
+
+            # update the parsed content to potentially use a richer type
+            # as opposed to the raw JSON-parsed object as the content is now
+            # complete and can be fully validated.
+            tool_call_snapshot.function.parsed_arguments = parsed_arguments
+
+            events_to_fire.append(
+                build(
+                    FunctionToolCallArgumentsDoneEvent,
+                    type="tool_calls.function.arguments.done",
+                    index=tool_index,
+                    name=tool_call_snapshot.function.name,
+                    arguments=tool_call_snapshot.function.arguments,
+                    parsed_arguments=parsed_arguments,
+                )
+            )
+        elif TYPE_CHECKING:  # type: ignore[unreachable]
+            assert_never(tool_call_snapshot)
+
+
+def _convert_initial_chunk_into_snapshot(chunk: ChatCompletionChunk) -> ParsedChatCompletionSnapshot:
+    data = chunk.to_dict()
+    choices = cast("list[object]", data["choices"])
+
+    for choice in chunk.choices:
+        choices[choice.index] = {
+            **choice.model_dump(exclude_unset=True, exclude={"delta"}),
+            "message": choice.delta.to_dict(),
+        }
+
+    return cast(
+        ParsedChatCompletionSnapshot,
+        construct_type(
+            type_=ParsedChatCompletionSnapshot,
+            value={
+                "system_fingerprint": None,
+                **data,
+                "object": "chat.completion",
+            },
+        ),
+    )
diff --git a/src/openai/lib/streaming/chat/_events.py b/src/openai/lib/streaming/chat/_events.py
new file mode 100644
index 0000000000..d4c1f28300
--- /dev/null
+++ b/src/openai/lib/streaming/chat/_events.py
@@ -0,0 +1,123 @@
+from typing import List, Union, Generic, Optional
+from typing_extensions import Literal
+
+from ._types import ParsedChatCompletionSnapshot
+from ...._models import BaseModel, GenericModel
+from ..._parsing import ResponseFormatT
+from ....types.chat import ChatCompletionChunk, ChatCompletionTokenLogprob
+
+
+class ChunkEvent(BaseModel):
+    type: Literal["chunk"]
+
+    chunk: ChatCompletionChunk
+
+    snapshot: ParsedChatCompletionSnapshot
+
+
+class ContentDeltaEvent(BaseModel):
+    """This event is yielded for every chunk with `choice.delta.content` data."""
+
+    type: Literal["content.delta"]
+
+    delta: str
+
+    snapshot: str
+
+    parsed: Optional[object] = None
+
+
+class ContentDoneEvent(GenericModel, Generic[ResponseFormatT]):
+    type: Literal["content.done"]
+
+    content: str
+
+    parsed: Optional[ResponseFormatT] = None
+
+
+class RefusalDeltaEvent(BaseModel):
+    type: Literal["refusal.delta"]
+
+    delta: str
+
+    snapshot: str
+
+
+class RefusalDoneEvent(BaseModel):
+    type: Literal["refusal.done"]
+
+    refusal: str
+
+
+class FunctionToolCallArgumentsDeltaEvent(BaseModel):
+    type: Literal["tool_calls.function.arguments.delta"]
+
+    name: str
+
+    index: int
+
+    arguments: str
+    """Accumulated raw JSON string"""
+
+    parsed_arguments: object
+    """The parsed arguments so far"""
+
+    arguments_delta: str
+    """The JSON string delta"""
+
+
+class FunctionToolCallArgumentsDoneEvent(BaseModel):
+    type: Literal["tool_calls.function.arguments.done"]
+
+    name: str
+
+    index: int
+
+    arguments: str
+    """Accumulated raw JSON string"""
+
+    parsed_arguments: object
+    """The parsed arguments"""
+
+
+class LogprobsContentDeltaEvent(BaseModel):
+    type: Literal["logprobs.content.delta"]
+
+    content: List[ChatCompletionTokenLogprob]
+
+    snapshot: List[ChatCompletionTokenLogprob]
+
+
+class LogprobsContentDoneEvent(BaseModel):
+    type: Literal["logprobs.content.done"]
+
+    content: List[ChatCompletionTokenLogprob]
+
+
+class LogprobsRefusalDeltaEvent(BaseModel):
+    type: Literal["logprobs.refusal.delta"]
+
+    refusal: List[ChatCompletionTokenLogprob]
+
+    snapshot: List[ChatCompletionTokenLogprob]
+
+
+class LogprobsRefusalDoneEvent(BaseModel):
+    type: Literal["logprobs.refusal.done"]
+
+    refusal: List[ChatCompletionTokenLogprob]
+
+
+ChatCompletionStreamEvent = Union[
+    ChunkEvent,
+    ContentDeltaEvent,
+    ContentDoneEvent[ResponseFormatT],
+    RefusalDeltaEvent,
+    RefusalDoneEvent,
+    FunctionToolCallArgumentsDeltaEvent,
+    FunctionToolCallArgumentsDoneEvent,
+    LogprobsContentDeltaEvent,
+    LogprobsContentDoneEvent,
+    LogprobsRefusalDeltaEvent,
+    LogprobsRefusalDoneEvent,
+]
diff --git a/src/openai/lib/streaming/chat/_types.py b/src/openai/lib/streaming/chat/_types.py
new file mode 100644
index 0000000000..42552893a0
--- /dev/null
+++ b/src/openai/lib/streaming/chat/_types.py
@@ -0,0 +1,20 @@
+from __future__ import annotations
+
+from typing_extensions import TypeAlias
+
+from ....types.chat import ParsedChoice, ParsedChatCompletion, ParsedChatCompletionMessage
+
+ParsedChatCompletionSnapshot: TypeAlias = ParsedChatCompletion[object]
+"""Snapshot type representing an in-progress accumulation of
+a `ParsedChatCompletion` object.
+"""
+
+ParsedChatCompletionMessageSnapshot: TypeAlias = ParsedChatCompletionMessage[object]
+"""Snapshot type representing an in-progress accumulation of
+a `ParsedChatCompletionMessage` object.
+
+If the content has been fully accumulated, the `.parsed` content will be
+the `response_format` instance, otherwise it'll be the raw JSON parsed version.
+"""
+
+ParsedChoiceSnapshot: TypeAlias = ParsedChoice[object]
diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py
index b4dc3cfdd6..441390d24b 100644
--- a/src/openai/resources/beta/assistants.py
+++ b/src/openai/resources/beta/assistants.py
@@ -88,6 +88,11 @@ def create(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
@@ -233,6 +238,11 @@ def update(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
@@ -453,6 +463,11 @@ async def create(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
@@ -598,6 +613,11 @@ async def update(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
diff --git a/src/openai/resources/beta/beta.py b/src/openai/resources/beta/beta.py
index 0d9806678f..479c97c471 100644
--- a/src/openai/resources/beta/beta.py
+++ b/src/openai/resources/beta/beta.py
@@ -11,6 +11,7 @@
     AsyncThreadsWithStreamingResponse,
 )
 from ..._compat import cached_property
+from .chat.chat import Chat, AsyncChat
 from .assistants import (
     Assistants,
     AsyncAssistants,
@@ -35,6 +36,10 @@
 
 
 class Beta(SyncAPIResource):
+    @cached_property
+    def chat(self) -> Chat:
+        return Chat(self._client)
+
     @cached_property
     def vector_stores(self) -> VectorStores:
         return VectorStores(self._client)
@@ -57,6 +62,10 @@ def with_streaming_response(self) -> BetaWithStreamingResponse:
 
 
 class AsyncBeta(AsyncAPIResource):
+    @cached_property
+    def chat(self) -> AsyncChat:
+        return AsyncChat(self._client)
+
     @cached_property
     def vector_stores(self) -> AsyncVectorStores:
         return AsyncVectorStores(self._client)
diff --git a/src/openai/resources/beta/chat/__init__.py b/src/openai/resources/beta/chat/__init__.py
new file mode 100644
index 0000000000..072d7867a5
--- /dev/null
+++ b/src/openai/resources/beta/chat/__init__.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .chat import Chat, AsyncChat
+from .completions import Completions, AsyncCompletions
+
+__all__ = [
+    "Completions",
+    "AsyncCompletions",
+    "Chat",
+    "AsyncChat",
+]
diff --git a/src/openai/resources/beta/chat/chat.py b/src/openai/resources/beta/chat/chat.py
new file mode 100644
index 0000000000..6afdcea381
--- /dev/null
+++ b/src/openai/resources/beta/chat/chat.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ...._compat import cached_property
+from .completions import Completions, AsyncCompletions
+from ...._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["Chat", "AsyncChat"]
+
+
+class Chat(SyncAPIResource):
+    @cached_property
+    def completions(self) -> Completions:
+        return Completions(self._client)
+
+
+class AsyncChat(AsyncAPIResource):
+    @cached_property
+    def completions(self) -> AsyncCompletions:
+        return AsyncCompletions(self._client)
diff --git a/src/openai/resources/beta/chat/completions.py b/src/openai/resources/beta/chat/completions.py
new file mode 100644
index 0000000000..88ea2c0572
--- /dev/null
+++ b/src/openai/resources/beta/chat/completions.py
@@ -0,0 +1,449 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, List, Union, Iterable, Optional
+from functools import partial
+from typing_extensions import Literal
+
+import httpx
+
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._streaming import Stream
+from ....types.chat import completion_create_params
+from ....lib._parsing import (
+    ResponseFormatT,
+    validate_input_tools as _validate_input_tools,
+    parse_chat_completion as _parse_chat_completion,
+    type_to_response_format_param as _type_to_response_format,
+)
+from ....types.chat_model import ChatModel
+from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager
+from ....types.chat.chat_completion_chunk import ChatCompletionChunk
+from ....types.chat.parsed_chat_completion import ParsedChatCompletion
+from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam
+from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam
+from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
+from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
+
+__all__ = ["Completions", "AsyncCompletions"]
+
+
+class Completions(SyncAPIResource):
+    def parse(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ParsedChatCompletion[ResponseFormatT]:
+        """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
+        & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
+
+        You can pass a pydantic model to this method and it will automatically convert the model
+        into a JSON schema, send it to the API and parse the response content back into the given model.
+
+        This method will also automatically parse `function` tool calls if:
+        - You use the `openai.pydantic_function_tool()` helper method
+        - You mark your tool schema with `"strict": True`
+
+        Example usage:
+        ```py
+        from pydantic import BaseModel
+        from openai import OpenAI
+
+        class Step(BaseModel):
+            explanation: str
+            output: str
+
+        class MathResponse(BaseModel):
+            steps: List[Step]
+            final_answer: str
+
+        client = OpenAI()
+        completion = client.beta.chat.completions.parse(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {"role": "system", "content": "You are a helpful math tutor."},
+                {"role": "user", "content": "solve 8x + 31 = 2"},
+            ],
+            response_format=MathResponse,
+        )
+
+        message = completion.choices[0].message
+        if message.parsed:
+            print(message.parsed.steps)
+            print("answer: ", message.parsed.final_answer)
+        ```
+        """
+        _validate_input_tools(tools)
+
+        extra_headers = {
+            "X-Stainless-Helper-Method": "beta.chat.completions.parse",
+            **(extra_headers or {}),
+        }
+
+        raw_completion = self._client.chat.completions.create(
+            messages=messages,
+            model=model,
+            response_format=_type_to_response_format(response_format),
+            frequency_penalty=frequency_penalty,
+            function_call=function_call,
+            functions=functions,
+            logit_bias=logit_bias,
+            logprobs=logprobs,
+            max_tokens=max_tokens,
+            n=n,
+            parallel_tool_calls=parallel_tool_calls,
+            presence_penalty=presence_penalty,
+            seed=seed,
+            service_tier=service_tier,
+            stop=stop,
+            stream_options=stream_options,
+            temperature=temperature,
+            tool_choice=tool_choice,
+            tools=tools,
+            top_logprobs=top_logprobs,
+            top_p=top_p,
+            user=user,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+        return _parse_chat_completion(
+            response_format=response_format,
+            chat_completion=raw_completion,
+            input_tools=tools,
+        )
+
+    def stream(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ChatCompletionStreamManager[ResponseFormatT]:
+        """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
+        and automatic accumulation of each delta.
+
+        This also supports all of the parsing utilities that `.parse()` does.
+
+        Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
+
+        ```py
+        with client.beta.chat.completions.stream(
+            model='gpt-4o-2024-08-06',
+            messages=[...],
+        ) as stream:
+            for event in stream:
+                if event.type == 'content.delta':
+                    print(event.content, flush=True, end='')
+        ```
+
+        When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
+
+        When the context manager exits, the response will be closed, however the `stream` instance is still available outside
+        the context manager.
+        """
+        extra_headers = {
+            "X-Stainless-Helper-Method": "beta.chat.completions.stream",
+            **(extra_headers or {}),
+        }
+
+        api_request: partial[Stream[ChatCompletionChunk]] = partial(
+            self._client.chat.completions.create,
+            messages=messages,
+            model=model,
+            stream=True,
+            response_format=_type_to_response_format(response_format),
+            frequency_penalty=frequency_penalty,
+            function_call=function_call,
+            functions=functions,
+            logit_bias=logit_bias,
+            logprobs=logprobs,
+            max_tokens=max_tokens,
+            n=n,
+            parallel_tool_calls=parallel_tool_calls,
+            presence_penalty=presence_penalty,
+            seed=seed,
+            service_tier=service_tier,
+            stop=stop,
+            stream_options=stream_options,
+            temperature=temperature,
+            tool_choice=tool_choice,
+            tools=tools,
+            top_logprobs=top_logprobs,
+            top_p=top_p,
+            user=user,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+        return ChatCompletionStreamManager(
+            api_request,
+            response_format=response_format,
+            input_tools=tools,
+        )
+
+
+class AsyncCompletions(AsyncAPIResource):
+    async def parse(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> ParsedChatCompletion[ResponseFormatT]:
+        """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
+        & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
+
+        You can pass a pydantic model to this method and it will automatically convert the model
+        into a JSON schema, send it to the API and parse the response content back into the given model.
+
+        This method will also automatically parse `function` tool calls if:
+        - You use the `openai.pydantic_function_tool()` helper method
+        - You mark your tool schema with `"strict": True`
+
+        Example usage:
+        ```py
+        from pydantic import BaseModel
+        from openai import AsyncOpenAI
+
+        class Step(BaseModel):
+            explanation: str
+            output: str
+
+        class MathResponse(BaseModel):
+            steps: List[Step]
+            final_answer: str
+
+        client = AsyncOpenAI()
+        completion = await client.beta.chat.completions.parse(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {"role": "system", "content": "You are a helpful math tutor."},
+                {"role": "user", "content": "solve 8x + 31 = 2"},
+            ],
+            response_format=MathResponse,
+        )
+
+        message = completion.choices[0].message
+        if message.parsed:
+            print(message.parsed.steps)
+            print("answer: ", message.parsed.final_answer)
+        ```
+        """
+        _validate_input_tools(tools)
+
+        extra_headers = {
+            "X-Stainless-Helper-Method": "beta.chat.completions.parse",
+            **(extra_headers or {}),
+        }
+
+        raw_completion = await self._client.chat.completions.create(
+            messages=messages,
+            model=model,
+            response_format=_type_to_response_format(response_format),
+            frequency_penalty=frequency_penalty,
+            function_call=function_call,
+            functions=functions,
+            logit_bias=logit_bias,
+            logprobs=logprobs,
+            max_tokens=max_tokens,
+            n=n,
+            parallel_tool_calls=parallel_tool_calls,
+            presence_penalty=presence_penalty,
+            seed=seed,
+            service_tier=service_tier,
+            stop=stop,
+            stream_options=stream_options,
+            temperature=temperature,
+            tool_choice=tool_choice,
+            tools=tools,
+            top_logprobs=top_logprobs,
+            top_p=top_p,
+            user=user,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+        return _parse_chat_completion(
+            response_format=response_format,
+            chat_completion=raw_completion,
+            input_tools=tools,
+        )
+
+    def stream(
+        self,
+        *,
+        messages: Iterable[ChatCompletionMessageParam],
+        model: Union[str, ChatModel],
+        response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN,
+        frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
+        functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
+        logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
+        logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
+        max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
+        n: Optional[int] | NotGiven = NOT_GIVEN,
+        parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
+        presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
+        seed: Optional[int] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
+        stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
+        temperature: Optional[float] | NotGiven = NOT_GIVEN,
+        tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
+        tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
+        top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
+        top_p: Optional[float] | NotGiven = NOT_GIVEN,
+        user: str | NotGiven = NOT_GIVEN,
+        # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+        # The extra values given here take precedence over values defined on the client or passed to this method.
+        extra_headers: Headers | None = None,
+        extra_query: Query | None = None,
+        extra_body: Body | None = None,
+        timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+    ) -> AsyncChatCompletionStreamManager[ResponseFormatT]:
+        """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
+        and automatic accumulation of each delta.
+
+        This also supports all of the parsing utilities that `.parse()` does.
+
+        Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
+
+        ```py
+        async with client.beta.chat.completions.stream(
+            model='gpt-4o-2024-08-06',
+            messages=[...],
+        ) as stream:
+            async for event in stream:
+                if event.type == 'content.delta':
+                    print(event.content, flush=True, end='')
+        ```
+
+        When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
+
+        When the context manager exits, the response will be closed, however the `stream` instance is still available outside
+        the context manager.
+        """
+        _validate_input_tools(tools)
+
+        extra_headers = {
+            "X-Stainless-Helper-Method": "beta.chat.completions.stream",
+            **(extra_headers or {}),
+        }
+
+        api_request = self._client.chat.completions.create(
+            messages=messages,
+            model=model,
+            stream=True,
+            response_format=_type_to_response_format(response_format),
+            frequency_penalty=frequency_penalty,
+            function_call=function_call,
+            functions=functions,
+            logit_bias=logit_bias,
+            logprobs=logprobs,
+            max_tokens=max_tokens,
+            n=n,
+            parallel_tool_calls=parallel_tool_calls,
+            presence_penalty=presence_penalty,
+            seed=seed,
+            service_tier=service_tier,
+            stop=stop,
+            stream_options=stream_options,
+            temperature=temperature,
+            tool_choice=tool_choice,
+            tools=tools,
+            top_logprobs=top_logprobs,
+            top_p=top_p,
+            user=user,
+            extra_headers=extra_headers,
+            extra_query=extra_query,
+            extra_body=extra_body,
+            timeout=timeout,
+        )
+        return AsyncChatCompletionStreamManager(
+            api_request,
+            response_format=response_format,
+            input_tools=tools,
+        )
diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py
index 23a09d30ce..cbfb9546f0 100644
--- a/src/openai/resources/beta/threads/runs/runs.py
+++ b/src/openai/resources/beta/threads/runs/runs.py
@@ -145,6 +145,11 @@ def create(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
@@ -275,6 +280,11 @@ def create(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
@@ -401,6 +411,11 @@ def create(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
@@ -1443,6 +1458,11 @@ async def create(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
@@ -1573,6 +1593,11 @@ async def create(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
@@ -1699,6 +1724,11 @@ async def create(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py
index f40e164180..4c95c484cc 100644
--- a/src/openai/resources/beta/threads/threads.py
+++ b/src/openai/resources/beta/threads/threads.py
@@ -323,6 +323,11 @@ def create_and_run(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
@@ -452,6 +457,11 @@ def create_and_run(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
@@ -577,6 +587,11 @@ def create_and_run(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
@@ -1131,6 +1146,11 @@ async def create_and_run(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
@@ -1260,6 +1280,11 @@ async def create_and_run(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
@@ -1385,6 +1410,11 @@ async def create_and_run(
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
               and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+              Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+              Outputs which guarantees the model will match your supplied JSON schema. Learn
+              more in the
+              [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
               Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
               message the model generates is valid JSON.
 
diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py
index 88892d1d64..3dcd3774d7 100644
--- a/src/openai/resources/chat/completions.py
+++ b/src/openai/resources/chat/completions.py
@@ -19,9 +19,7 @@
 from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
 from ..._streaming import Stream, AsyncStream
 from ...types.chat import completion_create_params
-from ..._base_client import (
-    make_request_options,
-)
+from ..._base_client import make_request_options
 from ...types.chat_model import ChatModel
 from ...types.chat.chat_completion import ChatCompletion
 from ...types.chat.chat_completion_chunk import ChatCompletionChunk
@@ -144,6 +142,8 @@ def create(
               [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
 
           response_format: An object specifying the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+              [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
               all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
 
@@ -340,6 +340,8 @@ def create(
               [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
 
           response_format: An object specifying the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+              [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
               all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
 
@@ -529,6 +531,8 @@ def create(
               [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
 
           response_format: An object specifying the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+              [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
               all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
 
@@ -793,6 +797,8 @@ async def create(
               [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
 
           response_format: An object specifying the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+              [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
               all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
 
@@ -989,6 +995,8 @@ async def create(
               [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
 
           response_format: An object specifying the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+              [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
               all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
 
@@ -1178,6 +1186,8 @@ async def create(
               [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
 
           response_format: An object specifying the format that the model must output. Compatible with
+              [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+              [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
               [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
               all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
 
diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py
index 14b384a88d..5cef7bcd22 100644
--- a/src/openai/resources/fine_tuning/jobs/jobs.py
+++ b/src/openai/resources/fine_tuning/jobs/jobs.py
@@ -52,7 +52,7 @@ def with_streaming_response(self) -> JobsWithStreamingResponse:
     def create(
         self,
         *,
-        model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]],
+        model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
         training_file: str,
         hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
         integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN,
@@ -77,7 +77,7 @@ def create(
 
         Args:
           model: The name of the model to fine-tune. You can select one of the
-              [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
+              [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
 
           training_file: The ID of an uploaded file that contains training data.
 
@@ -107,7 +107,7 @@ def create(
               name.
 
               For example, a `suffix` of "custom-model-name" would produce a model name like
-              `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
+              `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
 
           validation_file: The ID of an uploaded file that contains validation data.
 
@@ -332,7 +332,7 @@ def with_streaming_response(self) -> AsyncJobsWithStreamingResponse:
     async def create(
         self,
         *,
-        model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]],
+        model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
         training_file: str,
         hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN,
         integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN,
@@ -357,7 +357,7 @@ async def create(
 
         Args:
           model: The name of the model to fine-tune. You can select one of the
-              [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
+              [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
 
           training_file: The ID of an uploaded file that contains training data.
 
@@ -387,7 +387,7 @@ async def create(
               name.
 
               For example, a `suffix` of "custom-model-name" would produce a model name like
-              `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
+              `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
 
           validation_file: The ID of an uploaded file that contains validation data.
 
diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py
index 84916962cc..f621fb67c5 100644
--- a/src/openai/types/__init__.py
+++ b/src/openai/types/__init__.py
@@ -9,6 +9,9 @@
     ErrorObject as ErrorObject,
     FunctionDefinition as FunctionDefinition,
     FunctionParameters as FunctionParameters,
+    ResponseFormatText as ResponseFormatText,
+    ResponseFormatJSONObject as ResponseFormatJSONObject,
+    ResponseFormatJSONSchema as ResponseFormatJSONSchema,
 )
 from .upload import Upload as Upload
 from .embedding import Embedding as Embedding
diff --git a/src/openai/types/beta/__init__.py b/src/openai/types/beta/__init__.py
index d851a3619c..9c5ddfdbe0 100644
--- a/src/openai/types/beta/__init__.py
+++ b/src/openai/types/beta/__init__.py
@@ -23,7 +23,6 @@
 from .assistant_create_params import AssistantCreateParams as AssistantCreateParams
 from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams
 from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
-from .assistant_response_format import AssistantResponseFormat as AssistantResponseFormat
 from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
 from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
 from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam
@@ -31,7 +30,6 @@
 from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption
 from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams
 from .assistant_tool_choice_function import AssistantToolChoiceFunction as AssistantToolChoiceFunction
-from .assistant_response_format_param import AssistantResponseFormatParam as AssistantResponseFormatParam
 from .assistant_response_format_option import AssistantResponseFormatOption as AssistantResponseFormatOption
 from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam as AssistantToolChoiceOptionParam
 from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam as AssistantToolChoiceFunctionParam
diff --git a/src/openai/types/beta/assistant.py b/src/openai/types/beta/assistant.py
index 4e5adc766e..c6a0a4cfcf 100644
--- a/src/openai/types/beta/assistant.py
+++ b/src/openai/types/beta/assistant.py
@@ -89,6 +89,11 @@ class Assistant(BaseModel):
     [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
     and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+    Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+    Outputs which guarantees the model will match your supplied JSON schema. Learn
+    more in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
     Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
     message the model generates is valid JSON.
 
diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py
index c10f7f57ad..84cd4425d1 100644
--- a/src/openai/types/beta/assistant_create_params.py
+++ b/src/openai/types/beta/assistant_create_params.py
@@ -60,6 +60,11 @@ class AssistantCreateParams(TypedDict, total=False):
     [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
     and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+    Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+    Outputs which guarantees the model will match your supplied JSON schema. Learn
+    more in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
     Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
     message the model generates is valid JSON.
 
diff --git a/src/openai/types/beta/assistant_response_format.py b/src/openai/types/beta/assistant_response_format.py
deleted file mode 100644
index f53bdaf62a..0000000000
--- a/src/openai/types/beta/assistant_response_format.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["AssistantResponseFormat"]
-
-
-class AssistantResponseFormat(BaseModel):
-    type: Optional[Literal["text", "json_object"]] = None
-    """Must be one of `text` or `json_object`."""
diff --git a/src/openai/types/beta/assistant_response_format_option.py b/src/openai/types/beta/assistant_response_format_option.py
index 6ce390f6d6..6f06a3442f 100644
--- a/src/openai/types/beta/assistant_response_format_option.py
+++ b/src/openai/types/beta/assistant_response_format_option.py
@@ -3,8 +3,12 @@
 from typing import Union
 from typing_extensions import Literal, TypeAlias
 
-from .assistant_response_format import AssistantResponseFormat
+from ..shared.response_format_text import ResponseFormatText
+from ..shared.response_format_json_object import ResponseFormatJSONObject
+from ..shared.response_format_json_schema import ResponseFormatJSONSchema
 
 __all__ = ["AssistantResponseFormatOption"]
 
-AssistantResponseFormatOption: TypeAlias = Union[Literal["none", "auto"], AssistantResponseFormat]
+AssistantResponseFormatOption: TypeAlias = Union[
+    Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema
+]
diff --git a/src/openai/types/beta/assistant_response_format_option_param.py b/src/openai/types/beta/assistant_response_format_option_param.py
index 8100088723..680a060c3c 100644
--- a/src/openai/types/beta/assistant_response_format_option_param.py
+++ b/src/openai/types/beta/assistant_response_format_option_param.py
@@ -5,8 +5,13 @@
 from typing import Union
 from typing_extensions import Literal, TypeAlias
 
-from .assistant_response_format_param import AssistantResponseFormatParam
+from ...types import shared_params
 
 __all__ = ["AssistantResponseFormatOptionParam"]
 
-AssistantResponseFormatOptionParam: TypeAlias = Union[Literal["none", "auto"], AssistantResponseFormatParam]
+AssistantResponseFormatOptionParam: TypeAlias = Union[
+    Literal["auto"],
+    shared_params.ResponseFormatText,
+    shared_params.ResponseFormatJSONObject,
+    shared_params.ResponseFormatJSONSchema,
+]
diff --git a/src/openai/types/beta/assistant_response_format_param.py b/src/openai/types/beta/assistant_response_format_param.py
deleted file mode 100644
index 96e1d02115..0000000000
--- a/src/openai/types/beta/assistant_response_format_param.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["AssistantResponseFormatParam"]
-
-
-class AssistantResponseFormatParam(TypedDict, total=False):
-    type: Literal["text", "json_object"]
-    """Must be one of `text` or `json_object`."""
diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py
index b401e1a891..ade565819f 100644
--- a/src/openai/types/beta/assistant_update_params.py
+++ b/src/openai/types/beta/assistant_update_params.py
@@ -49,6 +49,11 @@ class AssistantUpdateParams(TypedDict, total=False):
     [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
     and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+    Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+    Outputs which guarantees the model will match your supplied JSON schema. Learn
+    more in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
     Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
     message the model generates is valid JSON.
 
diff --git a/src/openai/types/beta/file_search_tool.py b/src/openai/types/beta/file_search_tool.py
index e2711b9b3d..26ab1cb83f 100644
--- a/src/openai/types/beta/file_search_tool.py
+++ b/src/openai/types/beta/file_search_tool.py
@@ -12,8 +12,8 @@ class FileSearch(BaseModel):
     max_num_results: Optional[int] = None
     """The maximum number of results the file search tool should output.
 
-    The default is 20 for gpt-4\\** models and 5 for gpt-3.5-turbo. This number should
-    be between 1 and 50 inclusive.
+    The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
+    should be between 1 and 50 inclusive.
 
     Note that the file search tool may output fewer than `max_num_results` results.
     See the
diff --git a/src/openai/types/beta/file_search_tool_param.py b/src/openai/types/beta/file_search_tool_param.py
index 115f86a444..666719f8cd 100644
--- a/src/openai/types/beta/file_search_tool_param.py
+++ b/src/openai/types/beta/file_search_tool_param.py
@@ -11,8 +11,8 @@ class FileSearch(TypedDict, total=False):
     max_num_results: int
     """The maximum number of results the file search tool should output.
 
-    The default is 20 for gpt-4\\** models and 5 for gpt-3.5-turbo. This number should
-    be between 1 and 50 inclusive.
+    The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
+    should be between 1 and 50 inclusive.
 
     Note that the file search tool may output fewer than `max_num_results` results.
     See the
diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py
index 62cff921e2..7490b25ef3 100644
--- a/src/openai/types/beta/thread_create_and_run_params.py
+++ b/src/openai/types/beta/thread_create_and_run_params.py
@@ -100,6 +100,11 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
     [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
     and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+    Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+    Outputs which guarantees the model will match your supplied JSON schema. Learn
+    more in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
     Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
     message the model generates is valid JSON.
 
diff --git a/src/openai/types/beta/threads/__init__.py b/src/openai/types/beta/threads/__init__.py
index 023d76fc13..70853177bd 100644
--- a/src/openai/types/beta/threads/__init__.py
+++ b/src/openai/types/beta/threads/__init__.py
@@ -25,11 +25,13 @@
 from .text_content_block import TextContentBlock as TextContentBlock
 from .message_delta_event import MessageDeltaEvent as MessageDeltaEvent
 from .message_list_params import MessageListParams as MessageListParams
+from .refusal_delta_block import RefusalDeltaBlock as RefusalDeltaBlock
 from .file_path_annotation import FilePathAnnotation as FilePathAnnotation
 from .image_url_delta_block import ImageURLDeltaBlock as ImageURLDeltaBlock
 from .message_content_delta import MessageContentDelta as MessageContentDelta
 from .message_create_params import MessageCreateParams as MessageCreateParams
 from .message_update_params import MessageUpdateParams as MessageUpdateParams
+from .refusal_content_block import RefusalContentBlock as RefusalContentBlock
 from .image_file_delta_block import ImageFileDeltaBlock as ImageFileDeltaBlock
 from .image_url_content_block import ImageURLContentBlock as ImageURLContentBlock
 from .file_citation_annotation import FileCitationAnnotation as FileCitationAnnotation
diff --git a/src/openai/types/beta/threads/message_content.py b/src/openai/types/beta/threads/message_content.py
index 7b718c3ca9..9523c1e1b9 100644
--- a/src/openai/types/beta/threads/message_content.py
+++ b/src/openai/types/beta/threads/message_content.py
@@ -5,11 +5,14 @@
 
 from ...._utils import PropertyInfo
 from .text_content_block import TextContentBlock
+from .refusal_content_block import RefusalContentBlock
 from .image_url_content_block import ImageURLContentBlock
 from .image_file_content_block import ImageFileContentBlock
 
 __all__ = ["MessageContent"]
 
+
 MessageContent: TypeAlias = Annotated[
-    Union[ImageFileContentBlock, ImageURLContentBlock, TextContentBlock], PropertyInfo(discriminator="type")
+    Union[ImageFileContentBlock, ImageURLContentBlock, TextContentBlock, RefusalContentBlock],
+    PropertyInfo(discriminator="type"),
 ]
diff --git a/src/openai/types/beta/threads/message_content_delta.py b/src/openai/types/beta/threads/message_content_delta.py
index 667172c08f..b6e7dfa45a 100644
--- a/src/openai/types/beta/threads/message_content_delta.py
+++ b/src/openai/types/beta/threads/message_content_delta.py
@@ -5,11 +5,13 @@
 
 from ...._utils import PropertyInfo
 from .text_delta_block import TextDeltaBlock
+from .refusal_delta_block import RefusalDeltaBlock
 from .image_url_delta_block import ImageURLDeltaBlock
 from .image_file_delta_block import ImageFileDeltaBlock
 
 __all__ = ["MessageContentDelta"]
 
 MessageContentDelta: TypeAlias = Annotated[
-    Union[ImageFileDeltaBlock, TextDeltaBlock, ImageURLDeltaBlock], PropertyInfo(discriminator="type")
+    Union[ImageFileDeltaBlock, TextDeltaBlock, RefusalDeltaBlock, ImageURLDeltaBlock],
+    PropertyInfo(discriminator="type"),
 ]
diff --git a/src/openai/types/beta/threads/refusal_content_block.py b/src/openai/types/beta/threads/refusal_content_block.py
new file mode 100644
index 0000000000..d54f948554
--- /dev/null
+++ b/src/openai/types/beta/threads/refusal_content_block.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RefusalContentBlock"]
+
+
+class RefusalContentBlock(BaseModel):
+    refusal: str
+
+    type: Literal["refusal"]
+    """Always `refusal`."""
diff --git a/src/openai/types/beta/threads/refusal_delta_block.py b/src/openai/types/beta/threads/refusal_delta_block.py
new file mode 100644
index 0000000000..dbd8e62697
--- /dev/null
+++ b/src/openai/types/beta/threads/refusal_delta_block.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["RefusalDeltaBlock"]
+
+
+class RefusalDeltaBlock(BaseModel):
+    index: int
+    """The index of the refusal part in the message."""
+
+    type: Literal["refusal"]
+    """Always `refusal`."""
+
+    refusal: Optional[str] = None
diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py
index 81d10d4a56..0579e229d8 100644
--- a/src/openai/types/beta/threads/run.py
+++ b/src/openai/types/beta/threads/run.py
@@ -171,6 +171,11 @@ class Run(BaseModel):
     [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
     and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+    Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+    Outputs which guarantees the model will match your supplied JSON schema. Learn
+    more in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
     Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
     message the model generates is valid JSON.
 
diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py
index e0c42fd23f..d3e6d9c476 100644
--- a/src/openai/types/beta/threads/run_create_params.py
+++ b/src/openai/types/beta/threads/run_create_params.py
@@ -97,6 +97,11 @@ class RunCreateParamsBase(TypedDict, total=False):
     [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
     and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
 
+    Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+    Outputs which guarantees the model will match your supplied JSON schema. Learn
+    more in the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+
     Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
     message the model generates is valid JSON.
 
diff --git a/src/openai/types/beta/vector_stores/vector_store_file.py b/src/openai/types/beta/vector_stores/vector_store_file.py
index 4762de0ebd..65096e8dad 100644
--- a/src/openai/types/beta/vector_stores/vector_store_file.py
+++ b/src/openai/types/beta/vector_stores/vector_store_file.py
@@ -17,7 +17,7 @@
 
 
 class LastError(BaseModel):
-    code: Literal["internal_error", "file_not_found", "parsing_error", "unhandled_mime_type"]
+    code: Literal["server_error", "unsupported_file", "invalid_file"]
     """One of `server_error` or `rate_limit_exceeded`."""
 
     message: str
diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py
index 0ba812ff9b..a5cf3734b8 100644
--- a/src/openai/types/chat/__init__.py
+++ b/src/openai/types/chat/__init__.py
@@ -5,8 +5,17 @@
 from .chat_completion import ChatCompletion as ChatCompletion
 from .chat_completion_role import ChatCompletionRole as ChatCompletionRole
 from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
+from .parsed_chat_completion import (
+    ParsedChoice as ParsedChoice,
+    ParsedChatCompletion as ParsedChatCompletion,
+    ParsedChatCompletionMessage as ParsedChatCompletionMessage,
+)
 from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage
 from .completion_create_params import CompletionCreateParams as CompletionCreateParams
+from .parsed_function_tool_call import (
+    ParsedFunction as ParsedFunction,
+    ParsedFunctionToolCall as ParsedFunctionToolCall,
+)
 from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam
 from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam
 from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob
@@ -37,6 +46,9 @@
 from .chat_completion_tool_choice_option_param import (
     ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam,
 )
+from .chat_completion_content_part_refusal_param import (
+    ChatCompletionContentPartRefusalParam as ChatCompletionContentPartRefusalParam,
+)
 from .chat_completion_function_call_option_param import (
     ChatCompletionFunctionCallOptionParam as ChatCompletionFunctionCallOptionParam,
 )
diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py
index 5f4eaf3366..4b53e70890 100644
--- a/src/openai/types/chat/chat_completion.py
+++ b/src/openai/types/chat/chat_completion.py
@@ -15,6 +15,9 @@ class ChoiceLogprobs(BaseModel):
     content: Optional[List[ChatCompletionTokenLogprob]] = None
     """A list of message content tokens with log probability information."""
 
+    refusal: Optional[List[ChatCompletionTokenLogprob]] = None
+    """A list of message refusal tokens with log probability information."""
+
 
 class Choice(BaseModel):
     finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"]
diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py
index 8f7357b96c..2429d41d33 100644
--- a/src/openai/types/chat/chat_completion_assistant_message_param.py
+++ b/src/openai/types/chat/chat_completion_assistant_message_param.py
@@ -2,12 +2,16 @@
 
 from __future__ import annotations
 
-from typing import Iterable, Optional
-from typing_extensions import Literal, Required, TypedDict
+from typing import Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
 from .chat_completion_message_tool_call_param import ChatCompletionMessageToolCallParam
+from .chat_completion_content_part_refusal_param import ChatCompletionContentPartRefusalParam
 
-__all__ = ["ChatCompletionAssistantMessageParam", "FunctionCall"]
+__all__ = ["ChatCompletionAssistantMessageParam", "ContentArrayOfContentPart", "FunctionCall"]
+
+ContentArrayOfContentPart: TypeAlias = Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartRefusalParam]
 
 
 class FunctionCall(TypedDict, total=False):
@@ -27,7 +31,7 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False):
     role: Required[Literal["assistant"]]
     """The role of the messages author, in this case `assistant`."""
 
-    content: Optional[str]
+    content: Union[str, Iterable[ContentArrayOfContentPart], None]
     """The contents of the assistant message.
 
     Required unless `tool_calls` or `function_call` is specified.
@@ -47,5 +51,8 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False):
     role.
     """
 
+    refusal: Optional[str]
+    """The refusal message by the assistant."""
+
     tool_calls: Iterable[ChatCompletionMessageToolCallParam]
     """The tool calls generated by the model, such as function calls."""
diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py
index 65643c7e60..9ec6dc4bdb 100644
--- a/src/openai/types/chat/chat_completion_chunk.py
+++ b/src/openai/types/chat/chat_completion_chunk.py
@@ -67,6 +67,9 @@ class ChoiceDelta(BaseModel):
     model.
     """
 
+    refusal: Optional[str] = None
+    """The refusal message generated by the model."""
+
     role: Optional[Literal["system", "user", "assistant", "tool"]] = None
     """The role of the author of this message."""
 
@@ -77,6 +80,9 @@ class ChoiceLogprobs(BaseModel):
     content: Optional[List[ChatCompletionTokenLogprob]] = None
     """A list of message content tokens with log probability information."""
 
+    refusal: Optional[List[ChatCompletionTokenLogprob]] = None
+    """A list of message refusal tokens with log probability information."""
+
 
 class Choice(BaseModel):
     delta: ChoiceDelta
diff --git a/src/openai/types/chat/chat_completion_content_part_refusal_param.py b/src/openai/types/chat/chat_completion_content_part_refusal_param.py
new file mode 100644
index 0000000000..c18c7db770
--- /dev/null
+++ b/src/openai/types/chat/chat_completion_content_part_refusal_param.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ChatCompletionContentPartRefusalParam"]
+
+
+class ChatCompletionContentPartRefusalParam(TypedDict, total=False):
+    refusal: Required[str]
+    """The refusal message generated by the model."""
+
+    type: Required[Literal["refusal"]]
+    """The type of the content part."""
diff --git a/src/openai/types/chat/chat_completion_message.py b/src/openai/types/chat/chat_completion_message.py
index 8db7d17d24..492bb68c85 100644
--- a/src/openai/types/chat/chat_completion_message.py
+++ b/src/openai/types/chat/chat_completion_message.py
@@ -26,6 +26,9 @@ class ChatCompletionMessage(BaseModel):
     content: Optional[str] = None
     """The contents of the message."""
 
+    refusal: Optional[str] = None
+    """The refusal message generated by the model."""
+
     role: Literal["assistant"]
     """The role of the author of this message."""
 
diff --git a/src/openai/types/chat/chat_completion_system_message_param.py b/src/openai/types/chat/chat_completion_system_message_param.py
index 94bb3f636c..172ccea09e 100644
--- a/src/openai/types/chat/chat_completion_system_message_param.py
+++ b/src/openai/types/chat/chat_completion_system_message_param.py
@@ -2,13 +2,16 @@
 
 from __future__ import annotations
 
+from typing import Union, Iterable
 from typing_extensions import Literal, Required, TypedDict
 
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+
 __all__ = ["ChatCompletionSystemMessageParam"]
 
 
 class ChatCompletionSystemMessageParam(TypedDict, total=False):
-    content: Required[str]
+    content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
     """The contents of the system message."""
 
     role: Required[Literal["system"]]
diff --git a/src/openai/types/chat/chat_completion_tool_message_param.py b/src/openai/types/chat/chat_completion_tool_message_param.py
index 5c590e033f..eb5e270e47 100644
--- a/src/openai/types/chat/chat_completion_tool_message_param.py
+++ b/src/openai/types/chat/chat_completion_tool_message_param.py
@@ -2,13 +2,16 @@
 
 from __future__ import annotations
 
+from typing import Union, Iterable
 from typing_extensions import Literal, Required, TypedDict
 
+from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
+
 __all__ = ["ChatCompletionToolMessageParam"]
 
 
 class ChatCompletionToolMessageParam(TypedDict, total=False):
-    content: Required[str]
+    content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
     """The contents of the tool message."""
 
     role: Required[Literal["tool"]]
diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py
index 9e81881b9e..bf648a3858 100644
--- a/src/openai/types/chat/completion_create_params.py
+++ b/src/openai/types/chat/completion_create_params.py
@@ -121,7 +121,8 @@ class CompletionCreateParamsBase(TypedDict, total=False):
     response_format: ResponseFormat
     """An object specifying the format that the model must output.
 
-    Compatible with
+    Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
+    [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
     [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
     all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
 
@@ -250,9 +251,9 @@ class Function(TypedDict, total=False):
     """
 
 
-class ResponseFormat(TypedDict, total=False):
-    type: Literal["text", "json_object"]
-    """Must be one of `text` or `json_object`."""
+ResponseFormat: TypeAlias = Union[
+    shared_params.ResponseFormatText, shared_params.ResponseFormatJSONObject, shared_params.ResponseFormatJSONSchema
+]
 
 
 class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase):
diff --git a/src/openai/types/chat/parsed_chat_completion.py b/src/openai/types/chat/parsed_chat_completion.py
new file mode 100644
index 0000000000..4b11dac5a0
--- /dev/null
+++ b/src/openai/types/chat/parsed_chat_completion.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Generic, TypeVar, Optional
+
+from ..._models import GenericModel
+from .chat_completion import Choice, ChatCompletion
+from .chat_completion_message import ChatCompletionMessage
+from .parsed_function_tool_call import ParsedFunctionToolCall
+
+__all__ = ["ParsedChatCompletion", "ParsedChoice"]
+
+
+ContentType = TypeVar("ContentType")
+
+
+# we need to disable this check because we're overriding properties
+# with subclasses of their types which is technically unsound as
+# properties can be mutated.
+# pyright: reportIncompatibleVariableOverride=false
+
+
+class ParsedChatCompletionMessage(ChatCompletionMessage, GenericModel, Generic[ContentType]):
+    parsed: Optional[ContentType] = None
+    """The auto-parsed message contents"""
+
+    tool_calls: Optional[List[ParsedFunctionToolCall]] = None  # type: ignore[assignment]
+    """The tool calls generated by the model, such as function calls."""
+
+
+class ParsedChoice(Choice, GenericModel, Generic[ContentType]):
+    message: ParsedChatCompletionMessage[ContentType]
+    """A chat completion message generated by the model."""
+
+
+class ParsedChatCompletion(ChatCompletion, GenericModel, Generic[ContentType]):
+    choices: List[ParsedChoice[ContentType]]  # type: ignore[assignment]
+    """A list of chat completion choices.
+
+    Can be more than one if `n` is greater than 1.
+    """
diff --git a/src/openai/types/chat/parsed_function_tool_call.py b/src/openai/types/chat/parsed_function_tool_call.py
new file mode 100644
index 0000000000..3e90789f85
--- /dev/null
+++ b/src/openai/types/chat/parsed_function_tool_call.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .chat_completion_message_tool_call import Function, ChatCompletionMessageToolCall
+
+__all__ = ["ParsedFunctionToolCall", "ParsedFunction"]
+
+# we need to disable this check because we're overriding properties
+# with subclasses of their types which is technically unsound as
+# properties can be mutated.
+# pyright: reportIncompatibleVariableOverride=false
+
+
+class ParsedFunction(Function):
+    parsed_arguments: Optional[object] = None
+    """
+    The arguments to call the function with.
+
+    If you used `openai.pydantic_function_tool()` then this will be an
+    instance of the given `BaseModel`.
+
+    Otherwise, this will be the parsed JSON arguments.
+    """
+
+
+class ParsedFunctionToolCall(ChatCompletionMessageToolCall):
+    function: ParsedFunction
+    """The function that the model called."""
diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py
index edb7b732bf..686f26b783 100644
--- a/src/openai/types/chat_model.py
+++ b/src/openai/types/chat_model.py
@@ -6,6 +6,7 @@
 
 ChatModel: TypeAlias = Literal[
     "gpt-4o",
+    "gpt-4o-2024-08-06",
     "gpt-4o-2024-05-13",
     "gpt-4o-mini",
     "gpt-4o-mini-2024-07-18",
diff --git a/src/openai/types/fine_tuning/job_create_params.py b/src/openai/types/fine_tuning/job_create_params.py
index c5196e4406..e9be2ef1ca 100644
--- a/src/openai/types/fine_tuning/job_create_params.py
+++ b/src/openai/types/fine_tuning/job_create_params.py
@@ -9,11 +9,11 @@
 
 
 class JobCreateParams(TypedDict, total=False):
-    model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]]]
+    model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]]]
     """The name of the model to fine-tune.
 
     You can select one of the
-    [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned).
+    [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
     """
 
     training_file: Required[str]
@@ -54,7 +54,7 @@ class JobCreateParams(TypedDict, total=False):
     name.
 
     For example, a `suffix` of "custom-model-name" would produce a model name like
-    `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`.
+    `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
     """
 
     validation_file: Optional[str]
diff --git a/src/openai/types/shared/__init__.py b/src/openai/types/shared/__init__.py
index e085744e29..c8776bca0e 100644
--- a/src/openai/types/shared/__init__.py
+++ b/src/openai/types/shared/__init__.py
@@ -3,3 +3,6 @@
 from .error_object import ErrorObject as ErrorObject
 from .function_definition import FunctionDefinition as FunctionDefinition
 from .function_parameters import FunctionParameters as FunctionParameters
+from .response_format_text import ResponseFormatText as ResponseFormatText
+from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject
+from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema
diff --git a/src/openai/types/shared/function_definition.py b/src/openai/types/shared/function_definition.py
index 49f5e67c50..06baa23170 100644
--- a/src/openai/types/shared/function_definition.py
+++ b/src/openai/types/shared/function_definition.py
@@ -32,3 +32,12 @@ class FunctionDefinition(BaseModel):
 
     Omitting `parameters` defines a function with an empty parameter list.
     """
+
+    strict: Optional[bool] = None
+    """Whether to enable strict schema adherence when generating the function call.
+
+    If set to true, the model will follow the exact schema defined in the
+    `parameters` field. Only a subset of JSON Schema is supported when `strict` is
+    `true`. Learn more about Structured Outputs in the
+    [function calling guide](docs/guides/function-calling).
+    """
diff --git a/src/openai/types/shared/response_format_json_object.py b/src/openai/types/shared/response_format_json_object.py
new file mode 100644
index 0000000000..107728dd2e
--- /dev/null
+++ b/src/openai/types/shared/response_format_json_object.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFormatJSONObject"]
+
+
+class ResponseFormatJSONObject(BaseModel):
+    type: Literal["json_object"]
+    """The type of response format being defined: `json_object`"""
diff --git a/src/openai/types/shared/response_format_json_schema.py b/src/openai/types/shared/response_format_json_schema.py
new file mode 100644
index 0000000000..3194a4fe91
--- /dev/null
+++ b/src/openai/types/shared/response_format_json_schema.py
@@ -0,0 +1,44 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, Optional
+from typing_extensions import Literal
+
+from pydantic import Field as FieldInfo
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFormatJSONSchema", "JSONSchema"]
+
+
+class JSONSchema(BaseModel):
+    name: str
+    """The name of the response format.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
+    description: Optional[str] = None
+    """
+    A description of what the response format is for, used by the model to determine
+    how to respond in the format.
+    """
+
+    schema_: Optional[Dict[str, object]] = FieldInfo(alias="schema", default=None)
+    """The schema for the response format, described as a JSON Schema object."""
+
+    strict: Optional[bool] = None
+    """Whether to enable strict schema adherence when generating the output.
+
+    If set to true, the model will always follow the exact schema defined in the
+    `schema` field. Only a subset of JSON Schema is supported when `strict` is
+    `true`. To learn more, read the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+    """
+
+
+class ResponseFormatJSONSchema(BaseModel):
+    json_schema: JSONSchema
+
+    type: Literal["json_schema"]
+    """The type of response format being defined: `json_schema`"""
diff --git a/src/openai/types/shared/response_format_text.py b/src/openai/types/shared/response_format_text.py
new file mode 100644
index 0000000000..6721fe0973
--- /dev/null
+++ b/src/openai/types/shared/response_format_text.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseFormatText"]
+
+
+class ResponseFormatText(BaseModel):
+    type: Literal["text"]
+    """The type of response format being defined: `text`"""
diff --git a/src/openai/types/shared_params/__init__.py b/src/openai/types/shared_params/__init__.py
index ef638cb279..ab4057d59f 100644
--- a/src/openai/types/shared_params/__init__.py
+++ b/src/openai/types/shared_params/__init__.py
@@ -2,3 +2,6 @@
 
 from .function_definition import FunctionDefinition as FunctionDefinition
 from .function_parameters import FunctionParameters as FunctionParameters
+from .response_format_text import ResponseFormatText as ResponseFormatText
+from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject
+from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema
diff --git a/src/openai/types/shared_params/function_definition.py b/src/openai/types/shared_params/function_definition.py
index 29ccc548d4..f41392f154 100644
--- a/src/openai/types/shared_params/function_definition.py
+++ b/src/openai/types/shared_params/function_definition.py
@@ -2,6 +2,7 @@
 
 from __future__ import annotations
 
+from typing import Optional
 from typing_extensions import Required, TypedDict
 
 from ...types import shared_params
@@ -33,3 +34,12 @@ class FunctionDefinition(TypedDict, total=False):
 
     Omitting `parameters` defines a function with an empty parameter list.
     """
+
+    strict: Optional[bool]
+    """Whether to enable strict schema adherence when generating the function call.
+
+    If set to true, the model will follow the exact schema defined in the
+    `parameters` field. Only a subset of JSON Schema is supported when `strict` is
+    `true`. Learn more about Structured Outputs in the
+    [function calling guide](docs/guides/function-calling).
+    """
diff --git a/src/openai/types/shared_params/response_format_json_object.py b/src/openai/types/shared_params/response_format_json_object.py
new file mode 100644
index 0000000000..8419c6cb56
--- /dev/null
+++ b/src/openai/types/shared_params/response_format_json_object.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFormatJSONObject"]
+
+
+class ResponseFormatJSONObject(TypedDict, total=False):
+    type: Required[Literal["json_object"]]
+    """The type of response format being defined: `json_object`"""
diff --git a/src/openai/types/shared_params/response_format_json_schema.py b/src/openai/types/shared_params/response_format_json_schema.py
new file mode 100644
index 0000000000..4b60fae8ee
--- /dev/null
+++ b/src/openai/types/shared_params/response_format_json_schema.py
@@ -0,0 +1,42 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFormatJSONSchema", "JSONSchema"]
+
+
+class JSONSchema(TypedDict, total=False):
+    name: Required[str]
+    """The name of the response format.
+
+    Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
+    of 64.
+    """
+
+    description: str
+    """
+    A description of what the response format is for, used by the model to determine
+    how to respond in the format.
+    """
+
+    schema: Dict[str, object]
+    """The schema for the response format, described as a JSON Schema object."""
+
+    strict: Optional[bool]
+    """Whether to enable strict schema adherence when generating the output.
+
+    If set to true, the model will always follow the exact schema defined in the
+    `schema` field. Only a subset of JSON Schema is supported when `strict` is
+    `true`. To learn more, read the
+    [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+    """
+
+
+class ResponseFormatJSONSchema(TypedDict, total=False):
+    json_schema: Required[JSONSchema]
+
+    type: Required[Literal["json_schema"]]
+    """The type of response format being defined: `json_schema`"""
diff --git a/src/openai/types/shared_params/response_format_text.py b/src/openai/types/shared_params/response_format_text.py
new file mode 100644
index 0000000000..5bec7fc503
--- /dev/null
+++ b/src/openai/types/shared_params/response_format_text.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ResponseFormatText"]
+
+
+class ResponseFormatText(TypedDict, total=False):
+    type: Required[Literal["text"]]
+    """The type of response format being defined: `text`"""
diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py
index dd0ce9266e..fbd5ff0597 100644
--- a/tests/api_resources/beta/test_assistants.py
+++ b/tests/api_resources/beta/test_assistants.py
@@ -24,19 +24,19 @@ class TestAssistants:
     @parametrize
     def test_method_create(self, client: OpenAI) -> None:
         assistant = client.beta.assistants.create(
-            model="gpt-4-turbo",
+            model="gpt-4o",
         )
         assert_matches_type(Assistant, assistant, path=["response"])
 
     @parametrize
     def test_method_create_with_all_params(self, client: OpenAI) -> None:
         assistant = client.beta.assistants.create(
-            model="gpt-4-turbo",
-            description="string",
-            instructions="string",
+            model="gpt-4o",
+            description="description",
+            instructions="instructions",
             metadata={},
-            name="string",
-            response_format="none",
+            name="name",
+            response_format="auto",
             temperature=1,
             tool_resources={
                 "code_interpreter": {"file_ids": ["string", "string", "string"]},
@@ -59,7 +59,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
     @parametrize
     def test_raw_response_create(self, client: OpenAI) -> None:
         response = client.beta.assistants.with_raw_response.create(
-            model="gpt-4-turbo",
+            model="gpt-4o",
         )
 
         assert response.is_closed is True
@@ -70,7 +70,7 @@ def test_raw_response_create(self, client: OpenAI) -> None:
     @parametrize
     def test_streaming_response_create(self, client: OpenAI) -> None:
         with client.beta.assistants.with_streaming_response.create(
-            model="gpt-4-turbo",
+            model="gpt-4o",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -83,14 +83,14 @@ def test_streaming_response_create(self, client: OpenAI) -> None:
     @parametrize
     def test_method_retrieve(self, client: OpenAI) -> None:
         assistant = client.beta.assistants.retrieve(
-            "string",
+            "assistant_id",
         )
         assert_matches_type(Assistant, assistant, path=["response"])
 
     @parametrize
     def test_raw_response_retrieve(self, client: OpenAI) -> None:
         response = client.beta.assistants.with_raw_response.retrieve(
-            "string",
+            "assistant_id",
         )
 
         assert response.is_closed is True
@@ -101,7 +101,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None:
     @parametrize
     def test_streaming_response_retrieve(self, client: OpenAI) -> None:
         with client.beta.assistants.with_streaming_response.retrieve(
-            "string",
+            "assistant_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -121,20 +121,20 @@ def test_path_params_retrieve(self, client: OpenAI) -> None:
     @parametrize
     def test_method_update(self, client: OpenAI) -> None:
         assistant = client.beta.assistants.update(
-            "string",
+            assistant_id="assistant_id",
         )
         assert_matches_type(Assistant, assistant, path=["response"])
 
     @parametrize
     def test_method_update_with_all_params(self, client: OpenAI) -> None:
         assistant = client.beta.assistants.update(
-            "string",
-            description="string",
-            instructions="string",
+            assistant_id="assistant_id",
+            description="description",
+            instructions="instructions",
             metadata={},
-            model="string",
-            name="string",
-            response_format="none",
+            model="model",
+            name="name",
+            response_format="auto",
             temperature=1,
             tool_resources={
                 "code_interpreter": {"file_ids": ["string", "string", "string"]},
@@ -148,7 +148,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None:
     @parametrize
     def test_raw_response_update(self, client: OpenAI) -> None:
         response = client.beta.assistants.with_raw_response.update(
-            "string",
+            assistant_id="assistant_id",
         )
 
         assert response.is_closed is True
@@ -159,7 +159,7 @@ def test_raw_response_update(self, client: OpenAI) -> None:
     @parametrize
     def test_streaming_response_update(self, client: OpenAI) -> None:
         with client.beta.assistants.with_streaming_response.update(
-            "string",
+            assistant_id="assistant_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -173,7 +173,7 @@ def test_streaming_response_update(self, client: OpenAI) -> None:
     def test_path_params_update(self, client: OpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
             client.beta.assistants.with_raw_response.update(
-                "",
+                assistant_id="",
             )
 
     @parametrize
@@ -184,8 +184,8 @@ def test_method_list(self, client: OpenAI) -> None:
     @parametrize
     def test_method_list_with_all_params(self, client: OpenAI) -> None:
         assistant = client.beta.assistants.list(
-            after="string",
-            before="string",
+            after="after",
+            before="before",
             limit=0,
             order="asc",
         )
@@ -214,14 +214,14 @@ def test_streaming_response_list(self, client: OpenAI) -> None:
     @parametrize
     def test_method_delete(self, client: OpenAI) -> None:
         assistant = client.beta.assistants.delete(
-            "string",
+            "assistant_id",
         )
         assert_matches_type(AssistantDeleted, assistant, path=["response"])
 
     @parametrize
     def test_raw_response_delete(self, client: OpenAI) -> None:
         response = client.beta.assistants.with_raw_response.delete(
-            "string",
+            "assistant_id",
         )
 
         assert response.is_closed is True
@@ -232,7 +232,7 @@ def test_raw_response_delete(self, client: OpenAI) -> None:
     @parametrize
     def test_streaming_response_delete(self, client: OpenAI) -> None:
         with client.beta.assistants.with_streaming_response.delete(
-            "string",
+            "assistant_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -256,19 +256,19 @@ class TestAsyncAssistants:
     @parametrize
     async def test_method_create(self, async_client: AsyncOpenAI) -> None:
         assistant = await async_client.beta.assistants.create(
-            model="gpt-4-turbo",
+            model="gpt-4o",
         )
         assert_matches_type(Assistant, assistant, path=["response"])
 
     @parametrize
     async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
         assistant = await async_client.beta.assistants.create(
-            model="gpt-4-turbo",
-            description="string",
-            instructions="string",
+            model="gpt-4o",
+            description="description",
+            instructions="instructions",
             metadata={},
-            name="string",
-            response_format="none",
+            name="name",
+            response_format="auto",
             temperature=1,
             tool_resources={
                 "code_interpreter": {"file_ids": ["string", "string", "string"]},
@@ -291,7 +291,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
     @parametrize
     async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
         response = await async_client.beta.assistants.with_raw_response.create(
-            model="gpt-4-turbo",
+            model="gpt-4o",
         )
 
         assert response.is_closed is True
@@ -302,7 +302,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
     @parametrize
     async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
         async with async_client.beta.assistants.with_streaming_response.create(
-            model="gpt-4-turbo",
+            model="gpt-4o",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -315,14 +315,14 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> Non
     @parametrize
     async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
         assistant = await async_client.beta.assistants.retrieve(
-            "string",
+            "assistant_id",
         )
         assert_matches_type(Assistant, assistant, path=["response"])
 
     @parametrize
     async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
         response = await async_client.beta.assistants.with_raw_response.retrieve(
-            "string",
+            "assistant_id",
         )
 
         assert response.is_closed is True
@@ -333,7 +333,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
     @parametrize
     async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
         async with async_client.beta.assistants.with_streaming_response.retrieve(
-            "string",
+            "assistant_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -353,20 +353,20 @@ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
     @parametrize
     async def test_method_update(self, async_client: AsyncOpenAI) -> None:
         assistant = await async_client.beta.assistants.update(
-            "string",
+            assistant_id="assistant_id",
         )
         assert_matches_type(Assistant, assistant, path=["response"])
 
     @parametrize
     async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
         assistant = await async_client.beta.assistants.update(
-            "string",
-            description="string",
-            instructions="string",
+            assistant_id="assistant_id",
+            description="description",
+            instructions="instructions",
             metadata={},
-            model="string",
-            name="string",
-            response_format="none",
+            model="model",
+            name="name",
+            response_format="auto",
             temperature=1,
             tool_resources={
                 "code_interpreter": {"file_ids": ["string", "string", "string"]},
@@ -380,7 +380,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) ->
     @parametrize
     async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
         response = await async_client.beta.assistants.with_raw_response.update(
-            "string",
+            assistant_id="assistant_id",
         )
 
         assert response.is_closed is True
@@ -391,7 +391,7 @@ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
     @parametrize
     async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
         async with async_client.beta.assistants.with_streaming_response.update(
-            "string",
+            assistant_id="assistant_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -405,7 +405,7 @@ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> Non
     async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
         with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
             await async_client.beta.assistants.with_raw_response.update(
-                "",
+                assistant_id="",
             )
 
     @parametrize
@@ -416,8 +416,8 @@ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
     @parametrize
     async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
         assistant = await async_client.beta.assistants.list(
-            after="string",
-            before="string",
+            after="after",
+            before="before",
             limit=0,
             order="asc",
         )
@@ -446,14 +446,14 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
     @parametrize
     async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
         assistant = await async_client.beta.assistants.delete(
-            "string",
+            "assistant_id",
         )
         assert_matches_type(AssistantDeleted, assistant, path=["response"])
 
     @parametrize
     async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
         response = await async_client.beta.assistants.with_raw_response.delete(
-            "string",
+            "assistant_id",
         )
 
         assert response.is_closed is True
@@ -464,7 +464,7 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
     @parametrize
     async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
         async with async_client.beta.assistants.with_streaming_response.delete(
-            "string",
+            "assistant_id",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py
index 9e06b597ef..67fff736dd 100644
--- a/tests/api_resources/beta/test_threads.py
+++ b/tests/api_resources/beta/test_threads.py
@@ -302,9 +302,9 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI)
             max_completion_tokens=256,
             max_prompt_tokens=256,
             metadata={},
-            model="gpt-4-turbo",
+            model="gpt-4o",
             parallel_tool_calls=True,
-            response_format="none",
+            response_format="auto",
             stream=False,
             temperature=1,
             thread={
@@ -473,9 +473,9 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI)
             max_completion_tokens=256,
             max_prompt_tokens=256,
             metadata={},
-            model="gpt-4-turbo",
+            model="gpt-4o",
             parallel_tool_calls=True,
-            response_format="none",
+            response_format="auto",
             temperature=1,
             thread={
                 "messages": [
@@ -912,9 +912,9 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie
             max_completion_tokens=256,
             max_prompt_tokens=256,
             metadata={},
-            model="gpt-4-turbo",
+            model="gpt-4o",
             parallel_tool_calls=True,
-            response_format="none",
+            response_format="auto",
             stream=False,
             temperature=1,
             thread={
@@ -1083,9 +1083,9 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie
             max_completion_tokens=256,
             max_prompt_tokens=256,
             metadata={},
-            model="gpt-4-turbo",
+            model="gpt-4o",
             parallel_tool_calls=True,
-            response_format="none",
+            response_format="auto",
             temperature=1,
             thread={
                 "messages": [
diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py
index 26862ef1eb..e21c6c2c77 100644
--- a/tests/api_resources/beta/threads/test_runs.py
+++ b/tests/api_resources/beta/threads/test_runs.py
@@ -135,9 +135,9 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
             max_completion_tokens=256,
             max_prompt_tokens=256,
             metadata={},
-            model="gpt-4-turbo",
+            model="gpt-4o",
             parallel_tool_calls=True,
-            response_format="none",
+            response_format="auto",
             stream=False,
             temperature=1,
             tool_choice="none",
@@ -299,9 +299,9 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
             max_completion_tokens=256,
             max_prompt_tokens=256,
             metadata={},
-            model="gpt-4-turbo",
+            model="gpt-4o",
             parallel_tool_calls=True,
-            response_format="none",
+            response_format="auto",
             temperature=1,
             tool_choice="none",
             tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
@@ -801,9 +801,9 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
             max_completion_tokens=256,
             max_prompt_tokens=256,
             metadata={},
-            model="gpt-4-turbo",
+            model="gpt-4o",
             parallel_tool_calls=True,
-            response_format="none",
+            response_format="auto",
             stream=False,
             temperature=1,
             tool_choice="none",
@@ -965,9 +965,9 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
             max_completion_tokens=256,
             max_prompt_tokens=256,
             metadata={},
-            model="gpt-4-turbo",
+            model="gpt-4o",
             parallel_tool_calls=True,
-            response_format="none",
+            response_format="auto",
             temperature=1,
             tool_choice="none",
             tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}],
diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py
index 5cb2a8c717..d744dfe6ea 100644
--- a/tests/api_resources/chat/test_completions.py
+++ b/tests/api_resources/chat/test_completions.py
@@ -28,7 +28,7 @@ def test_method_create_overload_1(self, client: OpenAI) -> None:
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
         )
         assert_matches_type(ChatCompletion, completion, path=["response"])
 
@@ -42,7 +42,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
                     "name": "string",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
             frequency_penalty=-2,
             function_call="none",
             functions=[
@@ -58,7 +58,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
             n=1,
             parallel_tool_calls=True,
             presence_penalty=-2,
-            response_format={"type": "json_object"},
+            response_format={"type": "text"},
             seed=-9007199254740991,
             service_tier="auto",
             stop="string",
@@ -73,6 +73,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
                         "description": "string",
                         "name": "string",
                         "parameters": {"foo": "bar"},
+                        "strict": True,
                     },
                 },
                 {
@@ -81,6 +82,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
                         "description": "string",
                         "name": "string",
                         "parameters": {"foo": "bar"},
+                        "strict": True,
                     },
                 },
                 {
@@ -89,6 +91,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
                         "description": "string",
                         "name": "string",
                         "parameters": {"foo": "bar"},
+                        "strict": True,
                     },
                 },
             ],
@@ -107,7 +110,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
         )
 
         assert response.is_closed is True
@@ -124,7 +127,7 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -143,7 +146,7 @@ def test_method_create_overload_2(self, client: OpenAI) -> None:
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
             stream=True,
         )
         completion_stream.response.close()
@@ -158,7 +161,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
                     "name": "string",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
             stream=True,
             frequency_penalty=-2,
             function_call="none",
@@ -175,7 +178,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
             n=1,
             parallel_tool_calls=True,
             presence_penalty=-2,
-            response_format={"type": "json_object"},
+            response_format={"type": "text"},
             seed=-9007199254740991,
             service_tier="auto",
             stop="string",
@@ -189,6 +192,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
                         "description": "string",
                         "name": "string",
                         "parameters": {"foo": "bar"},
+                        "strict": True,
                     },
                 },
                 {
@@ -197,6 +201,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
                         "description": "string",
                         "name": "string",
                         "parameters": {"foo": "bar"},
+                        "strict": True,
                     },
                 },
                 {
@@ -205,6 +210,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
                         "description": "string",
                         "name": "string",
                         "parameters": {"foo": "bar"},
+                        "strict": True,
                     },
                 },
             ],
@@ -223,7 +229,7 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
             stream=True,
         )
 
@@ -240,7 +246,7 @@ def test_streaming_response_create_overload_2(self, client: OpenAI) -> None:
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
             stream=True,
         ) as response:
             assert not response.is_closed
@@ -264,7 +270,7 @@ async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
         )
         assert_matches_type(ChatCompletion, completion, path=["response"])
 
@@ -278,7 +284,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
                     "name": "string",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
             frequency_penalty=-2,
             function_call="none",
             functions=[
@@ -294,7 +300,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
             n=1,
             parallel_tool_calls=True,
             presence_penalty=-2,
-            response_format={"type": "json_object"},
+            response_format={"type": "text"},
             seed=-9007199254740991,
             service_tier="auto",
             stop="string",
@@ -309,6 +315,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
                         "description": "string",
                         "name": "string",
                         "parameters": {"foo": "bar"},
+                        "strict": True,
                     },
                 },
                 {
@@ -317,6 +324,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
                         "description": "string",
                         "name": "string",
                         "parameters": {"foo": "bar"},
+                        "strict": True,
                     },
                 },
                 {
@@ -325,6 +333,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
                         "description": "string",
                         "name": "string",
                         "parameters": {"foo": "bar"},
+                        "strict": True,
                     },
                 },
             ],
@@ -343,7 +352,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
         )
 
         assert response.is_closed is True
@@ -360,7 +369,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -379,7 +388,7 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
             stream=True,
         )
         await completion_stream.response.aclose()
@@ -394,7 +403,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
                     "name": "string",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
             stream=True,
             frequency_penalty=-2,
             function_call="none",
@@ -411,7 +420,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
             n=1,
             parallel_tool_calls=True,
             presence_penalty=-2,
-            response_format={"type": "json_object"},
+            response_format={"type": "text"},
             seed=-9007199254740991,
             service_tier="auto",
             stop="string",
@@ -425,6 +434,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
                         "description": "string",
                         "name": "string",
                         "parameters": {"foo": "bar"},
+                        "strict": True,
                     },
                 },
                 {
@@ -433,6 +443,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
                         "description": "string",
                         "name": "string",
                         "parameters": {"foo": "bar"},
+                        "strict": True,
                     },
                 },
                 {
@@ -441,6 +452,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
                         "description": "string",
                         "name": "string",
                         "parameters": {"foo": "bar"},
+                        "strict": True,
                     },
                 },
             ],
@@ -459,7 +471,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
             stream=True,
         )
 
@@ -476,7 +488,7 @@ async def test_streaming_response_create_overload_2(self, async_client: AsyncOpe
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
             stream=True,
         ) as response:
             assert not response.is_closed
diff --git a/tests/api_resources/fine_tuning/test_jobs.py b/tests/api_resources/fine_tuning/test_jobs.py
index 1ff6d63b31..68b3d73ac5 100644
--- a/tests/api_resources/fine_tuning/test_jobs.py
+++ b/tests/api_resources/fine_tuning/test_jobs.py
@@ -24,7 +24,7 @@ class TestJobs:
     @parametrize
     def test_method_create(self, client: OpenAI) -> None:
         job = client.fine_tuning.jobs.create(
-            model="gpt-3.5-turbo",
+            model="gpt-4o-mini",
             training_file="file-abc123",
         )
         assert_matches_type(FineTuningJob, job, path=["response"])
@@ -32,7 +32,7 @@ def test_method_create(self, client: OpenAI) -> None:
     @parametrize
     def test_method_create_with_all_params(self, client: OpenAI) -> None:
         job = client.fine_tuning.jobs.create(
-            model="gpt-3.5-turbo",
+            model="gpt-4o-mini",
             training_file="file-abc123",
             hyperparameters={
                 "batch_size": "auto",
@@ -77,7 +77,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
     @parametrize
     def test_raw_response_create(self, client: OpenAI) -> None:
         response = client.fine_tuning.jobs.with_raw_response.create(
-            model="gpt-3.5-turbo",
+            model="gpt-4o-mini",
             training_file="file-abc123",
         )
 
@@ -89,7 +89,7 @@ def test_raw_response_create(self, client: OpenAI) -> None:
     @parametrize
     def test_streaming_response_create(self, client: OpenAI) -> None:
         with client.fine_tuning.jobs.with_streaming_response.create(
-            model="gpt-3.5-turbo",
+            model="gpt-4o-mini",
             training_file="file-abc123",
         ) as response:
             assert not response.is_closed
@@ -263,7 +263,7 @@ class TestAsyncJobs:
     @parametrize
     async def test_method_create(self, async_client: AsyncOpenAI) -> None:
         job = await async_client.fine_tuning.jobs.create(
-            model="gpt-3.5-turbo",
+            model="gpt-4o-mini",
             training_file="file-abc123",
         )
         assert_matches_type(FineTuningJob, job, path=["response"])
@@ -271,7 +271,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
     @parametrize
     async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
         job = await async_client.fine_tuning.jobs.create(
-            model="gpt-3.5-turbo",
+            model="gpt-4o-mini",
             training_file="file-abc123",
             hyperparameters={
                 "batch_size": "auto",
@@ -316,7 +316,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
     @parametrize
     async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
         response = await async_client.fine_tuning.jobs.with_raw_response.create(
-            model="gpt-3.5-turbo",
+            model="gpt-4o-mini",
             training_file="file-abc123",
         )
 
@@ -328,7 +328,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
     @parametrize
     async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
         async with async_client.fine_tuning.jobs.with_streaming_response.create(
-            model="gpt-3.5-turbo",
+            model="gpt-4o-mini",
             training_file="file-abc123",
         ) as response:
             assert not response.is_closed
diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py
index 71f8e5834b..8791507c3e 100644
--- a/tests/api_resources/test_models.py
+++ b/tests/api_resources/test_models.py
@@ -21,14 +21,14 @@ class TestModels:
     @parametrize
     def test_method_retrieve(self, client: OpenAI) -> None:
         model = client.models.retrieve(
-            "gpt-3.5-turbo",
+            "gpt-4o-mini",
         )
         assert_matches_type(Model, model, path=["response"])
 
     @parametrize
     def test_raw_response_retrieve(self, client: OpenAI) -> None:
         response = client.models.with_raw_response.retrieve(
-            "gpt-3.5-turbo",
+            "gpt-4o-mini",
         )
 
         assert response.is_closed is True
@@ -39,7 +39,7 @@ def test_raw_response_retrieve(self, client: OpenAI) -> None:
     @parametrize
     def test_streaming_response_retrieve(self, client: OpenAI) -> None:
         with client.models.with_streaming_response.retrieve(
-            "gpt-3.5-turbo",
+            "gpt-4o-mini",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -84,14 +84,14 @@ def test_streaming_response_list(self, client: OpenAI) -> None:
     @parametrize
     def test_method_delete(self, client: OpenAI) -> None:
         model = client.models.delete(
-            "ft:gpt-3.5-turbo:acemeco:suffix:abc123",
+            "ft:gpt-4o-mini:acemeco:suffix:abc123",
         )
         assert_matches_type(ModelDeleted, model, path=["response"])
 
     @parametrize
     def test_raw_response_delete(self, client: OpenAI) -> None:
         response = client.models.with_raw_response.delete(
-            "ft:gpt-3.5-turbo:acemeco:suffix:abc123",
+            "ft:gpt-4o-mini:acemeco:suffix:abc123",
         )
 
         assert response.is_closed is True
@@ -102,7 +102,7 @@ def test_raw_response_delete(self, client: OpenAI) -> None:
     @parametrize
     def test_streaming_response_delete(self, client: OpenAI) -> None:
         with client.models.with_streaming_response.delete(
-            "ft:gpt-3.5-turbo:acemeco:suffix:abc123",
+            "ft:gpt-4o-mini:acemeco:suffix:abc123",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -126,14 +126,14 @@ class TestAsyncModels:
     @parametrize
     async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
         model = await async_client.models.retrieve(
-            "gpt-3.5-turbo",
+            "gpt-4o-mini",
         )
         assert_matches_type(Model, model, path=["response"])
 
     @parametrize
     async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
         response = await async_client.models.with_raw_response.retrieve(
-            "gpt-3.5-turbo",
+            "gpt-4o-mini",
         )
 
         assert response.is_closed is True
@@ -144,7 +144,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
     @parametrize
     async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
         async with async_client.models.with_streaming_response.retrieve(
-            "gpt-3.5-turbo",
+            "gpt-4o-mini",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -189,14 +189,14 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
     @parametrize
     async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
         model = await async_client.models.delete(
-            "ft:gpt-3.5-turbo:acemeco:suffix:abc123",
+            "ft:gpt-4o-mini:acemeco:suffix:abc123",
         )
         assert_matches_type(ModelDeleted, model, path=["response"])
 
     @parametrize
     async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
         response = await async_client.models.with_raw_response.delete(
-            "ft:gpt-3.5-turbo:acemeco:suffix:abc123",
+            "ft:gpt-4o-mini:acemeco:suffix:abc123",
         )
 
         assert response.is_closed is True
@@ -207,7 +207,7 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
     @parametrize
     async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
         async with async_client.models.with_streaming_response.delete(
-            "ft:gpt-3.5-turbo:acemeco:suffix:abc123",
+            "ft:gpt-4o-mini:acemeco:suffix:abc123",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
diff --git a/tests/lib/__init__.py b/tests/lib/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/lib/chat/__init__.py b/tests/lib/chat/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/lib/chat/_utils.py b/tests/lib/chat/_utils.py
new file mode 100644
index 0000000000..dcc32b17fd
--- /dev/null
+++ b/tests/lib/chat/_utils.py
@@ -0,0 +1,59 @@
+from __future__ import annotations
+
+import io
+import inspect
+from typing import Any, Iterable
+from typing_extensions import TypeAlias
+
+import rich
+import pytest
+import pydantic
+
+ReprArgs: TypeAlias = "Iterable[tuple[str | None, Any]]"
+
+
+def print_obj(obj: object, monkeypatch: pytest.MonkeyPatch) -> str:
+    """Pretty print an object to a string"""
+
+    # monkeypatch pydantic model printing so that model fields
+    # are always printed in the same order so we can reliably
+    # use this for snapshot tests
+    original_repr = pydantic.BaseModel.__repr_args__
+
+    def __repr_args__(self: pydantic.BaseModel) -> ReprArgs:
+        return sorted(original_repr(self), key=lambda arg: arg[0] or arg)
+
+    with monkeypatch.context() as m:
+        m.setattr(pydantic.BaseModel, "__repr_args__", __repr_args__)
+
+        buf = io.StringIO()
+
+        console = rich.console.Console(file=buf, width=120)
+        console.print(obj)
+
+        string = buf.getvalue()
+
+        # we remove all `fn_name.<locals>.` occurences
+        # so that we can share the same snapshots between
+        # pydantic v1 and pydantic v2 as their output for
+        # generic models differs, e.g.
+        #
+        # v2: `ParsedChatCompletion[test_parse_pydantic_model.<locals>.Location]`
+        # v1: `ParsedChatCompletion[Location]`
+        return clear_locals(string, stacklevel=2)
+
+
+def get_caller_name(*, stacklevel: int = 1) -> str:
+    frame = inspect.currentframe()
+    assert frame is not None
+
+    for i in range(stacklevel):
+        frame = frame.f_back
+        assert frame is not None, f"no {i}th frame"
+
+    return frame.f_code.co_name
+
+
+def clear_locals(string: str, *, stacklevel: int) -> str:
+    caller = get_caller_name(stacklevel=stacklevel + 1)
+    return string.replace(f"{caller}.<locals>.", "")
diff --git a/tests/lib/chat/test_completions.py b/tests/lib/chat/test_completions.py
new file mode 100644
index 0000000000..db370e4332
--- /dev/null
+++ b/tests/lib/chat/test_completions.py
@@ -0,0 +1,633 @@
+from __future__ import annotations
+
+import os
+import json
+from typing import Any, Callable
+from typing_extensions import Literal, TypeVar
+
+import httpx
+import pytest
+from respx import MockRouter
+from pydantic import BaseModel
+from inline_snapshot import snapshot
+
+import openai
+from openai import OpenAI, AsyncOpenAI
+from openai._utils import assert_signatures_in_sync
+
+from ._utils import print_obj
+from ...conftest import base_url
+from ..schema_types.query import Query
+
+_T = TypeVar("_T")
+
+# all the snapshots in this file are auto-generated from the live API
+#
+# you can update them with
+#
+# `OPENAI_LIVE=1 pytest --inline-snapshot=fix`
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_nothing(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    completion = _make_snapshot_request(
+        lambda c: c.beta.chat.completions.parse(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "What's the weather like in SF?",
+                },
+            ],
+        ),
+        content_snapshot=snapshot(
+            '{"id": "chatcmpl-9tABLlmqdEOYnmmWATUI3dNKlfXa3", "object": "chat.completion", "created": 1722934207, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "I\'m unable to provide real-time weather updates. For the current weather in San Francisco, I recommend checking a reliable weather website or app.", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 14, "completion_tokens": 27, "total_tokens": 41}, "system_fingerprint": "fp_e1a05a1dce"}'
+        ),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(completion, monkeypatch) == snapshot(
+        """\
+ParsedChatCompletion[NoneType](
+    choices=[
+        ParsedChoice[NoneType](
+            finish_reason='stop',
+            index=0,
+            logprobs=None,
+            message=ParsedChatCompletionMessage[NoneType](
+                content="I'm unable to provide real-time weather updates. For the current weather in San Francisco, I 
+recommend checking a reliable weather website or app.",
+                function_call=None,
+                parsed=None,
+                refusal=None,
+                role='assistant',
+                tool_calls=[]
+            )
+        )
+    ],
+    created=1722934207,
+    id='chatcmpl-9tABLlmqdEOYnmmWATUI3dNKlfXa3',
+    model='gpt-4o-2024-08-06',
+    object='chat.completion',
+    service_tier=None,
+    system_fingerprint='fp_e1a05a1dce',
+    usage=CompletionUsage(completion_tokens=27, prompt_tokens=14, total_tokens=41)
+)
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_model(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    class Location(BaseModel):
+        city: str
+        temperature: float
+        units: Literal["c", "f"]
+
+    completion = _make_snapshot_request(
+        lambda c: c.beta.chat.completions.parse(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "What's the weather like in SF?",
+                },
+            ],
+            response_format=Location,
+        ),
+        content_snapshot=snapshot(
+            '{"id": "chatcmpl-9tABUwdw3Kbe3VPRnMofh9lJkFkLV", "object": "chat.completion", "created": 1722934216, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 17, "completion_tokens": 14, "total_tokens": 31}, "system_fingerprint": "fp_e1a05a1dce"}'
+        ),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(completion, monkeypatch) == snapshot(
+        """\
+ParsedChatCompletion[Location](
+    choices=[
+        ParsedChoice[Location](
+            finish_reason='stop',
+            index=0,
+            logprobs=None,
+            message=ParsedChatCompletionMessage[Location](
+                content='{"city":"San Francisco","temperature":65,"units":"f"}',
+                function_call=None,
+                parsed=Location(city='San Francisco', temperature=65.0, units='f'),
+                refusal=None,
+                role='assistant',
+                tool_calls=[]
+            )
+        )
+    ],
+    created=1722934216,
+    id='chatcmpl-9tABUwdw3Kbe3VPRnMofh9lJkFkLV',
+    model='gpt-4o-2024-08-06',
+    object='chat.completion',
+    service_tier=None,
+    system_fingerprint='fp_e1a05a1dce',
+    usage=CompletionUsage(completion_tokens=14, prompt_tokens=17, total_tokens=31)
+)
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_model_multiple_choices(
+    client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch
+) -> None:
+    class Location(BaseModel):
+        city: str
+        temperature: float
+        units: Literal["c", "f"]
+
+    completion = _make_snapshot_request(
+        lambda c: c.beta.chat.completions.parse(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "What's the weather like in SF?",
+                },
+            ],
+            n=3,
+            response_format=Location,
+        ),
+        content_snapshot=snapshot(
+            '{"id": "chatcmpl-9tABVfBu4ZdyQFKe8RgsWsyL7UoIj", "object": "chat.completion", "created": 1722934217, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":58.0,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}, {"index": 1, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":61,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}, {"index": 2, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 17, "completion_tokens": 44, "total_tokens": 61}, "system_fingerprint": "fp_e1a05a1dce"}'
+        ),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(completion.choices, monkeypatch) == snapshot(
+        """\
+[
+    ParsedChoice[Location](
+        finish_reason='stop',
+        index=0,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[Location](
+            content='{"city":"San Francisco","temperature":58.0,"units":"f"}',
+            function_call=None,
+            parsed=Location(city='San Francisco', temperature=58.0, units='f'),
+            refusal=None,
+            role='assistant',
+            tool_calls=[]
+        )
+    ),
+    ParsedChoice[Location](
+        finish_reason='stop',
+        index=1,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[Location](
+            content='{"city":"San Francisco","temperature":61,"units":"f"}',
+            function_call=None,
+            parsed=Location(city='San Francisco', temperature=61.0, units='f'),
+            refusal=None,
+            role='assistant',
+            tool_calls=[]
+        )
+    ),
+    ParsedChoice[Location](
+        finish_reason='stop',
+        index=2,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[Location](
+            content='{"city":"San Francisco","temperature":65,"units":"f"}',
+            function_call=None,
+            parsed=Location(city='San Francisco', temperature=65.0, units='f'),
+            refusal=None,
+            role='assistant',
+            tool_calls=[]
+        )
+    )
+]
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_pydantic_tool_model_all_types(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    completion = _make_snapshot_request(
+        lambda c: c.beta.chat.completions.parse(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "look up all my orders in may of last year that were fulfilled but not delivered on time",
+                },
+            ],
+            tools=[openai.pydantic_function_tool(Query)],
+            response_format=Query,
+        ),
+        content_snapshot=snapshot(
+            '{"id": "chatcmpl-9tABVRLORZbby5zZjZhyrUdDU1XhB", "object": "chat.completion", "created": 1722934217, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_VcgQcA1C047fQnXDG0PQXG7O", "type": "function", "function": {"name": "Query", "arguments": "{\\"table_name\\":\\"orders\\",\\"columns\\":[\\"id\\",\\"status\\",\\"expected_delivery_date\\",\\"delivered_at\\"],\\"conditions\\":[{\\"column\\":\\"ordered_at\\",\\"operator\\":\\"=\\",\\"value\\":\\"2022-05\\"},{\\"column\\":\\"status\\",\\"operator\\":\\"=\\",\\"value\\":\\"fulfilled\\"},{\\"column\\":\\"delivered_at\\",\\"operator\\":\\">\\",\\"value\\":{\\"column_name\\":\\"expected_delivery_date\\"}}],\\"order_by\\":\\"asc\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 195, "completion_tokens": 85, "total_tokens": 280}, "system_fingerprint": "fp_e1a05a1dce"}'
+        ),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(completion.choices[0], monkeypatch) == snapshot(
+        """\
+ParsedChoice[Query](
+    finish_reason='tool_calls',
+    index=0,
+    logprobs=None,
+    message=ParsedChatCompletionMessage[Query](
+        content=None,
+        function_call=None,
+        parsed=None,
+        refusal=None,
+        role='assistant',
+        tool_calls=[
+            ParsedFunctionToolCall(
+                function=ParsedFunction(
+                    arguments='{"table_name":"orders","columns":["id","status","expected_delivery_date","delivered_at"],
+"conditions":[{"column":"ordered_at","operator":"=","value":"2022-05"},{"column":"status","operator":"=","value":"fulfil
+led"},{"column":"delivered_at","operator":">","value":{"column_name":"expected_delivery_date"}}],"order_by":"asc"}',
+                    name='Query',
+                    parsed_arguments=Query(
+                        columns=[
+                            <Column.id: 'id'>,
+                            <Column.status: 'status'>,
+                            <Column.expected_delivery_date: 'expected_delivery_date'>,
+                            <Column.delivered_at: 'delivered_at'>
+                        ],
+                        conditions=[
+                            Condition(column='ordered_at', operator=<Operator.eq: '='>, value='2022-05'),
+                            Condition(column='status', operator=<Operator.eq: '='>, value='fulfilled'),
+                            Condition(
+                                column='delivered_at',
+                                operator=<Operator.gt: '>'>,
+                                value=DynamicValue(column_name='expected_delivery_date')
+                            )
+                        ],
+                        order_by=<OrderBy.asc: 'asc'>,
+                        table_name=<Table.orders: 'orders'>
+                    )
+                ),
+                id='call_VcgQcA1C047fQnXDG0PQXG7O',
+                type='function'
+            )
+        ]
+    )
+)
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_max_tokens_reached(client: OpenAI, respx_mock: MockRouter) -> None:
+    class Location(BaseModel):
+        city: str
+        temperature: float
+        units: Literal["c", "f"]
+
+    with pytest.raises(openai.LengthFinishReasonError):
+        _make_snapshot_request(
+            lambda c: c.beta.chat.completions.parse(
+                model="gpt-4o-2024-08-06",
+                messages=[
+                    {
+                        "role": "user",
+                        "content": "What's the weather like in SF?",
+                    },
+                ],
+                max_tokens=1,
+                response_format=Location,
+            ),
+            content_snapshot=snapshot(
+                '{"id": "chatcmpl-9tABXbi3qast6oJvdaqQcK9C7k9fn", "object": "chat.completion", "created": 1722934219, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"", "refusal": null}, "logprobs": null, "finish_reason": "length"}], "usage": {"prompt_tokens": 17, "completion_tokens": 1, "total_tokens": 18}, "system_fingerprint": "fp_e1a05a1dce"}'
+            ),
+            mock_client=client,
+            respx_mock=respx_mock,
+        )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_model_refusal(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    class Location(BaseModel):
+        city: str
+        temperature: float
+        units: Literal["c", "f"]
+
+    completion = _make_snapshot_request(
+        lambda c: c.beta.chat.completions.parse(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "How do I make anthrax?",
+                },
+            ],
+            response_format=Location,
+        ),
+        content_snapshot=snapshot(
+            '{"id": "chatcmpl-9tABXJEffhEWxp24MeLxkDJCMtWmx", "object": "chat.completion", "created": 1722934219, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "refusal": "I\'m very sorry, but I can\'t assist with that."}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 17, "completion_tokens": 12, "total_tokens": 29}, "system_fingerprint": "fp_e1a05a1dce"}'
+        ),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(completion.choices, monkeypatch) == snapshot(
+        """\
+[
+    ParsedChoice[Location](
+        finish_reason='stop',
+        index=0,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[Location](
+            content=None,
+            function_call=None,
+            parsed=None,
+            refusal="I'm very sorry, but I can't assist with that.",
+            role='assistant',
+            tool_calls=[]
+        )
+    )
+]
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_tool(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    class GetWeatherArgs(BaseModel):
+        city: str
+        country: str
+        units: Literal["c", "f"] = "c"
+
+    completion = _make_snapshot_request(
+        lambda c: c.beta.chat.completions.parse(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "What's the weather like in Edinburgh?",
+                },
+            ],
+            tools=[
+                openai.pydantic_function_tool(GetWeatherArgs),
+            ],
+        ),
+        content_snapshot=snapshot(
+            '{"id": "chatcmpl-9tABgtKnF7Gbri4CmpOocmhg0UgBF", "object": "chat.completion", "created": 1722934228, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_9rqjEc1DQRADTYGVV45LbZwL", "type": "function", "function": {"name": "GetWeatherArgs", "arguments": "{\\"city\\":\\"Edinburgh\\",\\"country\\":\\"UK\\",\\"units\\":\\"c\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 76, "completion_tokens": 24, "total_tokens": 100}, "system_fingerprint": "fp_e1a05a1dce"}'
+        ),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(completion.choices, monkeypatch) == snapshot(
+        """\
+[
+    ParsedChoice[NoneType](
+        finish_reason='tool_calls',
+        index=0,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[NoneType](
+            content=None,
+            function_call=None,
+            parsed=None,
+            refusal=None,
+            role='assistant',
+            tool_calls=[
+                ParsedFunctionToolCall(
+                    function=ParsedFunction(
+                        arguments='{"city":"Edinburgh","country":"UK","units":"c"}',
+                        name='GetWeatherArgs',
+                        parsed_arguments=GetWeatherArgs(city='Edinburgh', country='UK', units='c')
+                    ),
+                    id='call_9rqjEc1DQRADTYGVV45LbZwL',
+                    type='function'
+                )
+            ]
+        )
+    )
+]
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_multiple_pydantic_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    class GetWeatherArgs(BaseModel):
+        """Get the temperature for the given country/city combo"""
+
+        city: str
+        country: str
+        units: Literal["c", "f"] = "c"
+
+    class GetStockPrice(BaseModel):
+        ticker: str
+        exchange: str
+
+    completion = _make_snapshot_request(
+        lambda c: c.beta.chat.completions.parse(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "What's the weather like in Edinburgh?",
+                },
+                {
+                    "role": "user",
+                    "content": "What's the price of AAPL?",
+                },
+            ],
+            tools=[
+                openai.pydantic_function_tool(GetWeatherArgs),
+                openai.pydantic_function_tool(
+                    GetStockPrice, name="get_stock_price", description="Fetch the latest price for a given ticker"
+                ),
+            ],
+        ),
+        content_snapshot=snapshot(
+            '{"id": "chatcmpl-9tABqDpvDTi0Cg8PHtKdNSFoh4UJv", "object": "chat.completion", "created": 1722934238, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_Yeg67XmQbMcohm3NGj0g12ty", "type": "function", "function": {"name": "GetWeatherArgs", "arguments": "{\\"city\\": \\"Edinburgh\\", \\"country\\": \\"GB\\", \\"units\\": \\"c\\"}"}}, {"id": "call_OGg3UZC2ksjAg7yrLXy8t1MO", "type": "function", "function": {"name": "get_stock_price", "arguments": "{\\"ticker\\": \\"AAPL\\", \\"exchange\\": \\"NASDAQ\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 149, "completion_tokens": 60, "total_tokens": 209}, "system_fingerprint": "fp_e1a05a1dce"}'
+        ),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(completion.choices, monkeypatch) == snapshot(
+        """\
+[
+    ParsedChoice[NoneType](
+        finish_reason='tool_calls',
+        index=0,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[NoneType](
+            content=None,
+            function_call=None,
+            parsed=None,
+            refusal=None,
+            role='assistant',
+            tool_calls=[
+                ParsedFunctionToolCall(
+                    function=ParsedFunction(
+                        arguments='{"city": "Edinburgh", "country": "GB", "units": "c"}',
+                        name='GetWeatherArgs',
+                        parsed_arguments=GetWeatherArgs(city='Edinburgh', country='GB', units='c')
+                    ),
+                    id='call_Yeg67XmQbMcohm3NGj0g12ty',
+                    type='function'
+                ),
+                ParsedFunctionToolCall(
+                    function=ParsedFunction(
+                        arguments='{"ticker": "AAPL", "exchange": "NASDAQ"}',
+                        name='get_stock_price',
+                        parsed_arguments=GetStockPrice(exchange='NASDAQ', ticker='AAPL')
+                    ),
+                    id='call_OGg3UZC2ksjAg7yrLXy8t1MO',
+                    type='function'
+                )
+            ]
+        )
+    )
+]
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_strict_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    completion = _make_snapshot_request(
+        lambda c: c.beta.chat.completions.parse(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "What's the weather like in SF?",
+                },
+            ],
+            tools=[
+                {
+                    "type": "function",
+                    "function": {
+                        "name": "get_weather",
+                        "parameters": {
+                            "type": "object",
+                            "properties": {
+                                "city": {"type": "string"},
+                                "state": {"type": "string"},
+                            },
+                            "required": [
+                                "city",
+                                "state",
+                            ],
+                            "additionalProperties": False,
+                        },
+                        "strict": True,
+                    },
+                }
+            ],
+        ),
+        content_snapshot=snapshot(
+            '{"id": "chatcmpl-9tAC0vDx3MfupXmsduSZavLVaLcrA", "object": "chat.completion", "created": 1722934248, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_iNznvWR4R81mizFFHjgh7o4i", "type": "function", "function": {"name": "get_weather", "arguments": "{\\"city\\":\\"San Francisco\\",\\"state\\":\\"CA\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 48, "completion_tokens": 19, "total_tokens": 67}, "system_fingerprint": "fp_e1a05a1dce"}'
+        ),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(completion.choices, monkeypatch) == snapshot(
+        """\
+[
+    ParsedChoice[NoneType](
+        finish_reason='tool_calls',
+        index=0,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[NoneType](
+            content=None,
+            function_call=None,
+            parsed=None,
+            refusal=None,
+            role='assistant',
+            tool_calls=[
+                ParsedFunctionToolCall(
+                    function=ParsedFunction(
+                        arguments='{"city":"San Francisco","state":"CA"}',
+                        name='get_weather',
+                        parsed_arguments={'city': 'San Francisco', 'state': 'CA'}
+                    ),
+                    id='call_iNznvWR4R81mizFFHjgh7o4i',
+                    type='function'
+                )
+            ]
+        )
+    )
+]
+"""
+    )
+
+
+def test_parse_non_strict_tools(client: OpenAI) -> None:
+    with pytest.raises(
+        ValueError, match="`get_weather` is not strict. Only `strict` function tools can be auto-parsed"
+    ):
+        client.beta.chat.completions.parse(
+            model="gpt-4o-2024-08-06",
+            messages=[],
+            tools=[
+                {
+                    "type": "function",
+                    "function": {
+                        "name": "get_weather",
+                        "parameters": {},
+                    },
+                }
+            ],
+        )
+
+
+@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
+def test_parse_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
+    checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
+
+    assert_signatures_in_sync(
+        checking_client.chat.completions.create,
+        checking_client.beta.chat.completions.parse,
+        exclude_params={"response_format", "stream"},
+    )
+
+
+def _make_snapshot_request(
+    func: Callable[[OpenAI], _T],
+    *,
+    content_snapshot: Any,
+    respx_mock: MockRouter,
+    mock_client: OpenAI,
+) -> _T:
+    live = os.environ.get("OPENAI_LIVE") == "1"
+    if live:
+
+        def _on_response(response: httpx.Response) -> None:
+            # update the content snapshot
+            assert json.dumps(json.loads(response.read())) == content_snapshot
+
+        respx_mock.stop()
+
+        client = OpenAI(
+            http_client=httpx.Client(
+                event_hooks={
+                    "response": [_on_response],
+                }
+            )
+        )
+    else:
+        respx_mock.post("/chat/completions").mock(
+            return_value=httpx.Response(
+                200,
+                content=content_snapshot._old_value,
+                headers={"content-type": "application/json"},
+            )
+        )
+
+        client = mock_client
+
+    result = func(client)
+
+    if live:
+        client.close()
+
+    return result
diff --git a/tests/lib/chat/test_completions_streaming.py b/tests/lib/chat/test_completions_streaming.py
new file mode 100644
index 0000000000..3aaa9a0f38
--- /dev/null
+++ b/tests/lib/chat/test_completions_streaming.py
@@ -0,0 +1,1047 @@
+from __future__ import annotations
+
+import os
+from typing import Any, Generic, Callable, Iterator, cast, overload
+from typing_extensions import Literal, TypeVar
+
+import rich
+import httpx
+import pytest
+from respx import MockRouter
+from pydantic import BaseModel
+from inline_snapshot import external, snapshot, outsource
+
+import openai
+from openai import OpenAI, AsyncOpenAI
+from openai._utils import assert_signatures_in_sync
+from openai._compat import model_copy
+from openai.lib.streaming.chat import (
+    ContentDoneEvent,
+    ChatCompletionStream,
+    ChatCompletionStreamEvent,
+    ChatCompletionStreamManager,
+    ParsedChatCompletionSnapshot,
+)
+from openai.lib._parsing._completions import ResponseFormatT
+
+from ._utils import print_obj
+from ...conftest import base_url
+
+_T = TypeVar("_T")
+
+# all the snapshots in this file are auto-generated from the live API
+#
+# you can update them with
+#
+# `OPENAI_LIVE=1 pytest --inline-snapshot=fix`
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_nothing(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    listener = _make_stream_snapshot_request(
+        lambda c: c.beta.chat.completions.stream(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "What's the weather like in SF?",
+                },
+            ],
+        ),
+        content_snapshot=snapshot(external("b9d6bee9f9b8*.bin")),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
+        """\
+[
+    ParsedChoice[NoneType](
+        finish_reason='stop',
+        index=0,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[NoneType](
+            content="I'm unable to provide real-time weather updates. To get the latest weather information for San 
+Francisco, I recommend checking a reliable weather website or using a weather app.",
+            function_call=None,
+            parsed=None,
+            refusal=None,
+            role='assistant',
+            tool_calls=[]
+        )
+    )
+]
+"""
+    )
+    assert print_obj(listener.get_event_by_type("content.done"), monkeypatch) == snapshot(
+        """\
+ContentDoneEvent[NoneType](
+    content="I'm unable to provide real-time weather updates. To get the latest weather information for San Francisco, I
+recommend checking a reliable weather website or using a weather app.",
+    parsed=None,
+    type='content.done'
+)
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_model(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    class Location(BaseModel):
+        city: str
+        temperature: float
+        units: Literal["c", "f"]
+
+    done_snapshots: list[ParsedChatCompletionSnapshot] = []
+
+    def on_event(stream: ChatCompletionStream[Location], event: ChatCompletionStreamEvent[Location]) -> None:
+        if event.type == "content.done":
+            done_snapshots.append(model_copy(stream.current_completion_snapshot, deep=True))
+
+    listener = _make_stream_snapshot_request(
+        lambda c: c.beta.chat.completions.stream(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "What's the weather like in SF?",
+                },
+            ],
+            response_format=Location,
+        ),
+        content_snapshot=snapshot(external("ea9a417d533b*.bin")),
+        mock_client=client,
+        respx_mock=respx_mock,
+        on_event=on_event,
+    )
+
+    assert len(done_snapshots) == 1
+    assert isinstance(done_snapshots[0].choices[0].message.parsed, Location)
+
+    for event in reversed(listener.events):
+        if event.type == "content.delta":
+            data = cast(Any, event.parsed)
+            assert isinstance(data["city"], str), data
+            assert isinstance(data["temperature"], (int, float)), data
+            assert isinstance(data["units"], str), data
+            break
+    else:
+        rich.print(listener.events)
+        raise AssertionError("Did not find a `content.delta` event")
+
+    assert print_obj(listener.stream.get_final_completion(), monkeypatch) == snapshot(
+        """\
+ParsedChatCompletion[Location](
+    choices=[
+        ParsedChoice[Location](
+            finish_reason='stop',
+            index=0,
+            logprobs=None,
+            message=ParsedChatCompletionMessage[Location](
+                content='{"city":"San Francisco","temperature":63,"units":"f"}',
+                function_call=None,
+                parsed=Location(city='San Francisco', temperature=63.0, units='f'),
+                refusal=None,
+                role='assistant',
+                tool_calls=[]
+            )
+        )
+    ],
+    created=1722934250,
+    id='chatcmpl-9tAC2Fr44W8e4GakwKuKSSsFPhISv',
+    model='gpt-4o-so',
+    object='chat.completion',
+    service_tier=None,
+    system_fingerprint='fp_e1a05a1dce',
+    usage=CompletionUsage(completion_tokens=14, prompt_tokens=17, total_tokens=31)
+)
+"""
+    )
+    assert print_obj(listener.get_event_by_type("content.done"), monkeypatch) == snapshot(
+        """\
+ContentDoneEvent[Location](
+    content='{"city":"San Francisco","temperature":63,"units":"f"}',
+    parsed=Location(city='San Francisco', temperature=63.0, units='f'),
+    type='content.done'
+)
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_model_multiple_choices(
+    client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch
+) -> None:
+    class Location(BaseModel):
+        city: str
+        temperature: float
+        units: Literal["c", "f"]
+
+    listener = _make_stream_snapshot_request(
+        lambda c: c.beta.chat.completions.stream(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "What's the weather like in SF?",
+                },
+            ],
+            n=3,
+            response_format=Location,
+        ),
+        content_snapshot=snapshot(external("1437bd06a9d5*.bin")),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert [e.type for e in listener.events] == snapshot(
+        [
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.delta",
+            "chunk",
+            "content.done",
+            "chunk",
+            "content.done",
+            "chunk",
+            "content.done",
+            "chunk",
+        ]
+    )
+    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
+        """\
+[
+    ParsedChoice[Location](
+        finish_reason='stop',
+        index=0,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[Location](
+            content='{"city":"San Francisco","temperature":64,"units":"f"}',
+            function_call=None,
+            parsed=Location(city='San Francisco', temperature=64.0, units='f'),
+            refusal=None,
+            role='assistant',
+            tool_calls=[]
+        )
+    ),
+    ParsedChoice[Location](
+        finish_reason='stop',
+        index=1,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[Location](
+            content='{"city":"San Francisco","temperature":68,"units":"f"}',
+            function_call=None,
+            parsed=Location(city='San Francisco', temperature=68.0, units='f'),
+            refusal=None,
+            role='assistant',
+            tool_calls=[]
+        )
+    ),
+    ParsedChoice[Location](
+        finish_reason='stop',
+        index=2,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[Location](
+            content='{"city":"San Francisco","temperature":64,"units":"f"}',
+            function_call=None,
+            parsed=Location(city='San Francisco', temperature=64.0, units='f'),
+            refusal=None,
+            role='assistant',
+            tool_calls=[]
+        )
+    )
+]
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_max_tokens_reached(client: OpenAI, respx_mock: MockRouter) -> None:
+    class Location(BaseModel):
+        city: str
+        temperature: float
+        units: Literal["c", "f"]
+
+    with pytest.raises(openai.LengthFinishReasonError):
+        _make_stream_snapshot_request(
+            lambda c: c.beta.chat.completions.stream(
+                model="gpt-4o-2024-08-06",
+                messages=[
+                    {
+                        "role": "user",
+                        "content": "What's the weather like in SF?",
+                    },
+                ],
+                max_tokens=1,
+                response_format=Location,
+            ),
+            content_snapshot=snapshot(external("7ae6c1a2631b*.bin")),
+            mock_client=client,
+            respx_mock=respx_mock,
+        )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_model_refusal(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    class Location(BaseModel):
+        city: str
+        temperature: float
+        units: Literal["c", "f"]
+
+    listener = _make_stream_snapshot_request(
+        lambda c: c.beta.chat.completions.stream(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "How do I make anthrax?",
+                },
+            ],
+            response_format=Location,
+        ),
+        content_snapshot=snapshot(external("d79326933c15*.bin")),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(listener.get_event_by_type("refusal.done"), monkeypatch) == snapshot("""\
+RefusalDoneEvent(refusal="I'm very sorry, but I can't assist with that request.", type='refusal.done')
+""")
+
+    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
+        """\
+[
+    ParsedChoice[Location](
+        finish_reason='stop',
+        index=0,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[Location](
+            content=None,
+            function_call=None,
+            parsed=None,
+            refusal="I'm very sorry, but I can't assist with that request.",
+            role='assistant',
+            tool_calls=[]
+        )
+    )
+]
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_content_logprobs_events(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    listener = _make_stream_snapshot_request(
+        lambda c: c.beta.chat.completions.stream(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "Say foo",
+                },
+            ],
+            logprobs=True,
+        ),
+        content_snapshot=snapshot(external("70c7df71ce72*.bin")),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj([e for e in listener.events if e.type.startswith("logprobs")], monkeypatch) == snapshot("""\
+[
+    LogprobsContentDeltaEvent(
+        content=[ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.006764991, token='Foo', top_logprobs=[])],
+        snapshot=[
+            ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.006764991, token='Foo', top_logprobs=[])
+        ],
+        type='logprobs.content.delta'
+    ),
+    LogprobsContentDeltaEvent(
+        content=[ChatCompletionTokenLogprob(bytes=[33], logprob=-0.31380808, token='!', top_logprobs=[])],
+        snapshot=[
+            ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.006764991, token='Foo', top_logprobs=[]),
+            ChatCompletionTokenLogprob(bytes=[33], logprob=-0.31380808, token='!', top_logprobs=[])
+        ],
+        type='logprobs.content.delta'
+    ),
+    LogprobsContentDoneEvent(
+        content=[
+            ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.006764991, token='Foo', top_logprobs=[]),
+            ChatCompletionTokenLogprob(bytes=[33], logprob=-0.31380808, token='!', top_logprobs=[])
+        ],
+        type='logprobs.content.done'
+    )
+]
+""")
+
+    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot("""\
+[
+    ParsedChoice[NoneType](
+        finish_reason='stop',
+        index=0,
+        logprobs=ChoiceLogprobs(
+            content=[
+                ChatCompletionTokenLogprob(bytes=[70, 111, 111], logprob=-0.006764991, token='Foo', top_logprobs=[]),
+                ChatCompletionTokenLogprob(bytes=[33], logprob=-0.31380808, token='!', top_logprobs=[])
+            ],
+            refusal=None
+        ),
+        message=ParsedChatCompletionMessage[NoneType](
+            content='Foo!',
+            function_call=None,
+            parsed=None,
+            refusal=None,
+            role='assistant',
+            tool_calls=[]
+        )
+    )
+]
+""")
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_refusal_logprobs_events(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    class Location(BaseModel):
+        city: str
+        temperature: float
+        units: Literal["c", "f"]
+
+    listener = _make_stream_snapshot_request(
+        lambda c: c.beta.chat.completions.stream(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "How do I make anthrax?",
+                },
+            ],
+            logprobs=True,
+            response_format=Location,
+        ),
+        content_snapshot=snapshot(external("cb77dc69b6c8*.bin")),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj([e.type for e in listener.events if e.type.startswith("logprobs")], monkeypatch) == snapshot("""\
+[
+    'logprobs.refusal.delta',
+    'logprobs.refusal.delta',
+    'logprobs.refusal.delta',
+    'logprobs.refusal.delta',
+    'logprobs.refusal.delta',
+    'logprobs.refusal.delta',
+    'logprobs.refusal.delta',
+    'logprobs.refusal.delta',
+    'logprobs.refusal.delta',
+    'logprobs.refusal.delta',
+    'logprobs.refusal.delta',
+    'logprobs.refusal.done'
+]
+""")
+
+    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot("""\
+[
+    ParsedChoice[Location](
+        finish_reason='stop',
+        index=0,
+        logprobs=ChoiceLogprobs(
+            content=None,
+            refusal=[
+                ChatCompletionTokenLogprob(bytes=[73, 39, 109], logprob=-0.0010472201, token="I'm", top_logprobs=[]),
+                ChatCompletionTokenLogprob(
+                    bytes=[32, 118, 101, 114, 121],
+                    logprob=-0.7292482,
+                    token=' very',
+                    top_logprobs=[]
+                ),
+                ChatCompletionTokenLogprob(
+                    bytes=[32, 115, 111, 114, 114, 121],
+                    logprob=-5.080963e-06,
+                    token=' sorry',
+                    top_logprobs=[]
+                ),
+                ChatCompletionTokenLogprob(bytes=[44], logprob=-4.048445e-05, token=',', top_logprobs=[]),
+                ChatCompletionTokenLogprob(
+                    bytes=[32, 98, 117, 116],
+                    logprob=-0.038046427,
+                    token=' but',
+                    top_logprobs=[]
+                ),
+                ChatCompletionTokenLogprob(bytes=[32, 73], logprob=-0.0019351852, token=' I', top_logprobs=[]),
+                ChatCompletionTokenLogprob(
+                    bytes=[32, 99, 97, 110, 39, 116],
+                    logprob=-0.008995773,
+                    token=" can't",
+                    top_logprobs=[]
+                ),
+                ChatCompletionTokenLogprob(
+                    bytes=[32, 97, 115, 115, 105, 115, 116],
+                    logprob=-0.0033510819,
+                    token=' assist',
+                    top_logprobs=[]
+                ),
+                ChatCompletionTokenLogprob(
+                    bytes=[32, 119, 105, 116, 104],
+                    logprob=-0.0036033941,
+                    token=' with',
+                    top_logprobs=[]
+                ),
+                ChatCompletionTokenLogprob(
+                    bytes=[32, 116, 104, 97, 116],
+                    logprob=-0.0015974608,
+                    token=' that',
+                    top_logprobs=[]
+                ),
+                ChatCompletionTokenLogprob(bytes=[46], logprob=-0.6339823, token='.', top_logprobs=[])
+            ]
+        ),
+        message=ParsedChatCompletionMessage[Location](
+            content=None,
+            function_call=None,
+            parsed=None,
+            refusal="I'm very sorry, but I can't assist with that.",
+            role='assistant',
+            tool_calls=[]
+        )
+    )
+]
+""")
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_pydantic_tool(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    class GetWeatherArgs(BaseModel):
+        city: str
+        country: str
+        units: Literal["c", "f"] = "c"
+
+    listener = _make_stream_snapshot_request(
+        lambda c: c.beta.chat.completions.stream(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "What's the weather like in Edinburgh?",
+                },
+            ],
+            tools=[
+                openai.pydantic_function_tool(GetWeatherArgs),
+            ],
+        ),
+        content_snapshot=snapshot(external("ae070a447e1d*.bin")),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(listener.stream.current_completion_snapshot.choices, monkeypatch) == snapshot(
+        """\
+[
+    ParsedChoice[object](
+        finish_reason='tool_calls',
+        index=0,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[object](
+            content=None,
+            function_call=None,
+            parsed=None,
+            refusal=None,
+            role='assistant',
+            tool_calls=[
+                ParsedFunctionToolCall(
+                    function=ParsedFunction(
+                        arguments='{"city":"Edinburgh","country":"UK","units":"c"}',
+                        name='GetWeatherArgs',
+                        parsed_arguments=GetWeatherArgs(city='Edinburgh', country='UK', units='c')
+                    ),
+                    id='call_Vz6ZXciy6Y0PYfT4d9W7fYB4',
+                    index=0,
+                    type='function'
+                )
+            ]
+        )
+    )
+]
+"""
+    )
+
+    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
+        """\
+[
+    ParsedChoice[NoneType](
+        finish_reason='tool_calls',
+        index=0,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[NoneType](
+            content=None,
+            function_call=None,
+            parsed=None,
+            refusal=None,
+            role='assistant',
+            tool_calls=[
+                ParsedFunctionToolCall(
+                    function=ParsedFunction(
+                        arguments='{"city":"Edinburgh","country":"UK","units":"c"}',
+                        name='GetWeatherArgs',
+                        parsed_arguments=GetWeatherArgs(city='Edinburgh', country='UK', units='c')
+                    ),
+                    id='call_Vz6ZXciy6Y0PYfT4d9W7fYB4',
+                    index=0,
+                    type='function'
+                )
+            ]
+        )
+    )
+]
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_multiple_pydantic_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    class GetWeatherArgs(BaseModel):
+        """Get the temperature for the given country/city combo"""
+
+        city: str
+        country: str
+        units: Literal["c", "f"] = "c"
+
+    class GetStockPrice(BaseModel):
+        ticker: str
+        exchange: str
+
+    listener = _make_stream_snapshot_request(
+        lambda c: c.beta.chat.completions.stream(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "What's the weather like in Edinburgh?",
+                },
+                {
+                    "role": "user",
+                    "content": "What's the price of AAPL?",
+                },
+            ],
+            tools=[
+                openai.pydantic_function_tool(GetWeatherArgs),
+                openai.pydantic_function_tool(
+                    GetStockPrice, name="get_stock_price", description="Fetch the latest price for a given ticker"
+                ),
+            ],
+        ),
+        content_snapshot=snapshot(external("a346213bec7a*.bin")),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(listener.stream.current_completion_snapshot.choices, monkeypatch) == snapshot(
+        """\
+[
+    ParsedChoice[object](
+        finish_reason='tool_calls',
+        index=0,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[object](
+            content=None,
+            function_call=None,
+            parsed=None,
+            refusal=None,
+            role='assistant',
+            tool_calls=[
+                ParsedFunctionToolCall(
+                    function=ParsedFunction(
+                        arguments='{"city": "Edinburgh", "country": "UK", "units": "c"}',
+                        name='GetWeatherArgs',
+                        parsed_arguments=GetWeatherArgs(city='Edinburgh', country='UK', units='c')
+                    ),
+                    id='call_g4Q1vRbE0CaHGOs5if8mHsBq',
+                    index=0,
+                    type='function'
+                ),
+                ParsedFunctionToolCall(
+                    function=ParsedFunction(
+                        arguments='{"ticker": "AAPL", "exchange": "NASDAQ"}',
+                        name='get_stock_price',
+                        parsed_arguments=GetStockPrice(exchange='NASDAQ', ticker='AAPL')
+                    ),
+                    id='call_gWj3HQxZEHnFvyJLEHIiJKBV',
+                    index=1,
+                    type='function'
+                )
+            ]
+        )
+    )
+]
+"""
+    )
+    completion = listener.stream.get_final_completion()
+    assert print_obj(completion.choices[0].message.tool_calls, monkeypatch) == snapshot(
+        """\
+[
+    ParsedFunctionToolCall(
+        function=ParsedFunction(
+            arguments='{"city": "Edinburgh", "country": "UK", "units": "c"}',
+            name='GetWeatherArgs',
+            parsed_arguments=GetWeatherArgs(city='Edinburgh', country='UK', units='c')
+        ),
+        id='call_g4Q1vRbE0CaHGOs5if8mHsBq',
+        index=0,
+        type='function'
+    ),
+    ParsedFunctionToolCall(
+        function=ParsedFunction(
+            arguments='{"ticker": "AAPL", "exchange": "NASDAQ"}',
+            name='get_stock_price',
+            parsed_arguments=GetStockPrice(exchange='NASDAQ', ticker='AAPL')
+        ),
+        id='call_gWj3HQxZEHnFvyJLEHIiJKBV',
+        index=1,
+        type='function'
+    )
+]
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_parse_strict_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    listener = _make_stream_snapshot_request(
+        lambda c: c.beta.chat.completions.stream(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "What's the weather like in SF?",
+                },
+            ],
+            tools=[
+                {
+                    "type": "function",
+                    "function": {
+                        "name": "get_weather",
+                        "parameters": {
+                            "type": "object",
+                            "properties": {
+                                "city": {"type": "string"},
+                                "state": {"type": "string"},
+                            },
+                            "required": [
+                                "city",
+                                "state",
+                            ],
+                            "additionalProperties": False,
+                        },
+                        "strict": True,
+                    },
+                }
+            ],
+        ),
+        content_snapshot=snapshot(external("a7097cae6a1f*.bin")),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(listener.stream.current_completion_snapshot.choices, monkeypatch) == snapshot(
+        """\
+[
+    ParsedChoice[object](
+        finish_reason='tool_calls',
+        index=0,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[object](
+            content=None,
+            function_call=None,
+            parsed=None,
+            refusal=None,
+            role='assistant',
+            tool_calls=[
+                ParsedFunctionToolCall(
+                    function=ParsedFunction(
+                        arguments='{"city":"San Francisco","state":"CA"}',
+                        name='get_weather',
+                        parsed_arguments={'city': 'San Francisco', 'state': 'CA'}
+                    ),
+                    id='call_rQe3kzGnTr2epjx8HREg3F2a',
+                    index=0,
+                    type='function'
+                )
+            ]
+        )
+    )
+]
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_non_pydantic_response_format(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None:
+    listener = _make_stream_snapshot_request(
+        lambda c: c.beta.chat.completions.stream(
+            model="gpt-4o-2024-08-06",
+            messages=[
+                {
+                    "role": "user",
+                    "content": "What's the weather like in SF? Give me any JSON back",
+                },
+            ],
+            response_format={"type": "json_object"},
+        ),
+        content_snapshot=snapshot(external("3e0df46f250d*.bin")),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
+        """\
+[
+    ParsedChoice[NoneType](
+        finish_reason='stop',
+        index=0,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[NoneType](
+            content='{\\n  "location": "San Francisco, CA",\\n  "temperature": "N/A",\\n  "conditions": "N/A",\\n  
+"humidity": "N/A",\\n  "wind_speed": "N/A",\\n  "timestamp": "N/A",\\n  "note": "Real-time weather data is not available. 
+Please check a reliable weather service for the most up-to-date information on San Francisco\\'s weather conditions."}',
+            function_call=None,
+            parsed=None,
+            refusal=None,
+            role='assistant',
+            tool_calls=[]
+        )
+    )
+]
+"""
+    )
+
+
+@pytest.mark.respx(base_url=base_url)
+def test_allows_non_strict_tools_but_no_parsing(
+    client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch
+) -> None:
+    listener = _make_stream_snapshot_request(
+        lambda c: c.beta.chat.completions.stream(
+            model="gpt-4o-2024-08-06",
+            messages=[{"role": "user", "content": "what's the weather in NYC?"}],
+            tools=[
+                {
+                    "type": "function",
+                    "function": {
+                        "name": "get_weather",
+                        "parameters": {"type": "object", "properties": {"city": {"type": "string"}}},
+                    },
+                }
+            ],
+        ),
+        content_snapshot=snapshot(external("fb75060ede89*.bin")),
+        mock_client=client,
+        respx_mock=respx_mock,
+    )
+
+    assert print_obj(listener.get_event_by_type("tool_calls.function.arguments.done"), monkeypatch) == snapshot("""\
+FunctionToolCallArgumentsDoneEvent(
+    arguments='{"city":"New York City"}',
+    index=0,
+    name='get_weather',
+    parsed_arguments=None,
+    type='tool_calls.function.arguments.done'
+)
+""")
+
+    assert print_obj(listener.stream.get_final_completion().choices, monkeypatch) == snapshot(
+        """\
+[
+    ParsedChoice[NoneType](
+        finish_reason='stop',
+        index=0,
+        logprobs=None,
+        message=ParsedChatCompletionMessage[NoneType](
+            content=None,
+            function_call=None,
+            parsed=None,
+            refusal=None,
+            role='assistant',
+            tool_calls=[
+                ParsedFunctionToolCall(
+                    function=ParsedFunction(
+                        arguments='{"city":"New York City"}',
+                        name='get_weather',
+                        parsed_arguments=None
+                    ),
+                    id='call_9rqjEc1DQRADTYGVV45LbZwL',
+                    index=0,
+                    type='function'
+                )
+            ]
+        )
+    )
+]
+"""
+    )
+
+
+@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
+def test_stream_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
+    checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
+
+    assert_signatures_in_sync(
+        checking_client.chat.completions.create,
+        checking_client.beta.chat.completions.stream,
+        exclude_params={"response_format", "stream"},
+    )
+
+
+class StreamListener(Generic[ResponseFormatT]):
+    def __init__(self, stream: ChatCompletionStream[ResponseFormatT]) -> None:
+        self.stream = stream
+        self.events: list[ChatCompletionStreamEvent[ResponseFormatT]] = []
+
+    def __iter__(self) -> Iterator[ChatCompletionStreamEvent[ResponseFormatT]]:
+        for event in self.stream:
+            self.events.append(event)
+            yield event
+
+    @overload
+    def get_event_by_type(self, event_type: Literal["content.done"]) -> ContentDoneEvent[ResponseFormatT] | None: ...
+
+    @overload
+    def get_event_by_type(self, event_type: str) -> ChatCompletionStreamEvent[ResponseFormatT] | None: ...
+
+    def get_event_by_type(self, event_type: str) -> ChatCompletionStreamEvent[ResponseFormatT] | None:
+        return next((e for e in self.events if e.type == event_type), None)
+
+
+def _make_stream_snapshot_request(
+    func: Callable[[OpenAI], ChatCompletionStreamManager[ResponseFormatT]],
+    *,
+    content_snapshot: Any,
+    respx_mock: MockRouter,
+    mock_client: OpenAI,
+    on_event: Callable[[ChatCompletionStream[ResponseFormatT], ChatCompletionStreamEvent[ResponseFormatT]], Any]
+    | None = None,
+) -> StreamListener[ResponseFormatT]:
+    live = os.environ.get("OPENAI_LIVE") == "1"
+    if live:
+
+        def _on_response(response: httpx.Response) -> None:
+            # update the content snapshot
+            assert outsource(response.read()) == content_snapshot
+
+        respx_mock.stop()
+
+        client = OpenAI(
+            http_client=httpx.Client(
+                event_hooks={
+                    "response": [_on_response],
+                }
+            )
+        )
+    else:
+        respx_mock.post("/chat/completions").mock(
+            return_value=httpx.Response(
+                200,
+                content=content_snapshot._old_value._load_value(),
+                headers={"content-type": "text/event-stream"},
+            )
+        )
+
+        client = mock_client
+
+    with func(client) as stream:
+        listener = StreamListener(stream)
+
+        for event in listener:
+            if on_event:
+                on_event(stream, event)
+
+    if live:
+        client.close()
+
+    return listener
diff --git a/tests/lib/schema_types/query.py b/tests/lib/schema_types/query.py
new file mode 100644
index 0000000000..d2284424f0
--- /dev/null
+++ b/tests/lib/schema_types/query.py
@@ -0,0 +1,51 @@
+from enum import Enum
+from typing import List, Union
+
+from pydantic import BaseModel
+
+
+class Table(str, Enum):
+    orders = "orders"
+    customers = "customers"
+    products = "products"
+
+
+class Column(str, Enum):
+    id = "id"
+    status = "status"
+    expected_delivery_date = "expected_delivery_date"
+    delivered_at = "delivered_at"
+    shipped_at = "shipped_at"
+    ordered_at = "ordered_at"
+    canceled_at = "canceled_at"
+
+
+class Operator(str, Enum):
+    eq = "="
+    gt = ">"
+    lt = "<"
+    le = "<="
+    ge = ">="
+    ne = "!="
+
+
+class OrderBy(str, Enum):
+    asc = "asc"
+    desc = "desc"
+
+
+class DynamicValue(BaseModel):
+    column_name: str
+
+
+class Condition(BaseModel):
+    column: str
+    operator: Operator
+    value: Union[str, int, DynamicValue]
+
+
+class Query(BaseModel):
+    table_name: Table
+    columns: List[Column]
+    conditions: List[Condition]
+    order_by: OrderBy
diff --git a/tests/lib/test_pydantic.py b/tests/lib/test_pydantic.py
new file mode 100644
index 0000000000..dc09596da2
--- /dev/null
+++ b/tests/lib/test_pydantic.py
@@ -0,0 +1,161 @@
+from __future__ import annotations
+
+from inline_snapshot import snapshot
+
+import openai
+from openai._compat import PYDANTIC_V2
+
+from .schema_types.query import Query
+
+
+def test_most_types() -> None:
+    if PYDANTIC_V2:
+        assert openai.pydantic_function_tool(Query)["function"] == snapshot(
+            {
+                "name": "Query",
+                "strict": True,
+                "parameters": {
+                    "$defs": {
+                        "Column": {
+                            "enum": [
+                                "id",
+                                "status",
+                                "expected_delivery_date",
+                                "delivered_at",
+                                "shipped_at",
+                                "ordered_at",
+                                "canceled_at",
+                            ],
+                            "title": "Column",
+                            "type": "string",
+                        },
+                        "Condition": {
+                            "properties": {
+                                "column": {"title": "Column", "type": "string"},
+                                "operator": {"$ref": "#/$defs/Operator"},
+                                "value": {
+                                    "anyOf": [
+                                        {"type": "string"},
+                                        {"type": "integer"},
+                                        {"$ref": "#/$defs/DynamicValue"},
+                                    ],
+                                    "title": "Value",
+                                },
+                            },
+                            "required": ["column", "operator", "value"],
+                            "title": "Condition",
+                            "type": "object",
+                            "additionalProperties": False,
+                        },
+                        "DynamicValue": {
+                            "properties": {"column_name": {"title": "Column Name", "type": "string"}},
+                            "required": ["column_name"],
+                            "title": "DynamicValue",
+                            "type": "object",
+                            "additionalProperties": False,
+                        },
+                        "Operator": {"enum": ["=", ">", "<", "<=", ">=", "!="], "title": "Operator", "type": "string"},
+                        "OrderBy": {"enum": ["asc", "desc"], "title": "OrderBy", "type": "string"},
+                        "Table": {"enum": ["orders", "customers", "products"], "title": "Table", "type": "string"},
+                    },
+                    "properties": {
+                        "table_name": {"$ref": "#/$defs/Table"},
+                        "columns": {
+                            "items": {"$ref": "#/$defs/Column"},
+                            "title": "Columns",
+                            "type": "array",
+                        },
+                        "conditions": {
+                            "items": {"$ref": "#/$defs/Condition"},
+                            "title": "Conditions",
+                            "type": "array",
+                        },
+                        "order_by": {"$ref": "#/$defs/OrderBy"},
+                    },
+                    "required": ["table_name", "columns", "conditions", "order_by"],
+                    "title": "Query",
+                    "type": "object",
+                    "additionalProperties": False,
+                },
+            }
+        )
+    else:
+        assert openai.pydantic_function_tool(Query)["function"] == snapshot(
+            {
+                "name": "Query",
+                "strict": True,
+                "parameters": {
+                    "title": "Query",
+                    "type": "object",
+                    "properties": {
+                        "table_name": {"$ref": "#/definitions/Table"},
+                        "columns": {"type": "array", "items": {"$ref": "#/definitions/Column"}},
+                        "conditions": {
+                            "title": "Conditions",
+                            "type": "array",
+                            "items": {"$ref": "#/definitions/Condition"},
+                        },
+                        "order_by": {"$ref": "#/definitions/OrderBy"},
+                    },
+                    "required": ["table_name", "columns", "conditions", "order_by"],
+                    "definitions": {
+                        "Table": {
+                            "title": "Table",
+                            "description": "An enumeration.",
+                            "enum": ["orders", "customers", "products"],
+                            "type": "string",
+                        },
+                        "Column": {
+                            "title": "Column",
+                            "description": "An enumeration.",
+                            "enum": [
+                                "id",
+                                "status",
+                                "expected_delivery_date",
+                                "delivered_at",
+                                "shipped_at",
+                                "ordered_at",
+                                "canceled_at",
+                            ],
+                            "type": "string",
+                        },
+                        "Operator": {
+                            "title": "Operator",
+                            "description": "An enumeration.",
+                            "enum": ["=", ">", "<", "<=", ">=", "!="],
+                            "type": "string",
+                        },
+                        "DynamicValue": {
+                            "title": "DynamicValue",
+                            "type": "object",
+                            "properties": {"column_name": {"title": "Column Name", "type": "string"}},
+                            "required": ["column_name"],
+                        },
+                        "Condition": {
+                            "title": "Condition",
+                            "type": "object",
+                            "properties": {
+                                "column": {"title": "Column", "type": "string"},
+                                "operator": {"$ref": "#/definitions/Operator"},
+                                "value": {
+                                    "title": "Value",
+                                    "anyOf": [
+                                        {"type": "string"},
+                                        {"type": "integer"},
+                                        {"$ref": "#/definitions/DynamicValue"},
+                                    ],
+                                },
+                            },
+                            "required": ["column", "operator", "value"],
+                        },
+                        "OrderBy": {
+                            "title": "OrderBy",
+                            "description": "An enumeration.",
+                            "enum": ["asc", "desc"],
+                            "type": "string",
+                        },
+                    },
+                    "additionalProperties": False,
+                },
+            }
+        )
diff --git a/tests/test_client.py b/tests/test_client.py
index 2402ffa82f..054ae0ff4e 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -780,11 +780,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
         response = client.chat.completions.with_raw_response.create(
             messages=[
                 {
-                    "content": "content",
+                    "content": "string",
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
         )
 
         assert response.retries_taken == failures_before_success
@@ -811,11 +811,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
         with client.chat.completions.with_streaming_response.create(
             messages=[
                 {
-                    "content": "content",
+                    "content": "string",
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
         ) as response:
             assert response.retries_taken == failures_before_success
 
@@ -1574,11 +1574,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
         response = await client.chat.completions.with_raw_response.create(
             messages=[
                 {
-                    "content": "content",
+                    "content": "string",
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
         )
 
         assert response.retries_taken == failures_before_success
@@ -1606,10 +1606,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
         async with client.chat.completions.with_streaming_response.create(
             messages=[
                 {
-                    "content": "content",
+                    "content": "string",
                     "role": "system",
                 }
             ],
-            model="gpt-4-turbo",
+            model="gpt-4o",
         ) as response:
             assert response.retries_taken == failures_before_success
diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py
index 8cf65ce94e..86a2adb1a2 100644
--- a/tests/test_deepcopy.py
+++ b/tests/test_deepcopy.py
@@ -41,8 +41,7 @@ def test_nested_list() -> None:
     assert_different_identities(obj1[1], obj2[1])
 
 
-class MyObject:
-    ...
+class MyObject: ...
 
 
 def test_ignores_other_types() -> None:
diff --git a/tests/test_legacy_response.py b/tests/test_legacy_response.py
index 45025f81d0..3659ee12c1 100644
--- a/tests/test_legacy_response.py
+++ b/tests/test_legacy_response.py
@@ -12,8 +12,7 @@
 from openai._legacy_response import LegacyAPIResponse
 
 
-class PydanticModel(pydantic.BaseModel):
-    ...
+class PydanticModel(pydantic.BaseModel): ...
 
 
 def test_response_parse_mismatched_basemodel(client: OpenAI) -> None:
diff --git a/tests/test_response.py b/tests/test_response.py
index af153b67c4..6ea1be1a1a 100644
--- a/tests/test_response.py
+++ b/tests/test_response.py
@@ -19,16 +19,13 @@
 from openai._base_client import FinalRequestOptions
 
 
-class ConcreteBaseAPIResponse(APIResponse[bytes]):
-    ...
+class ConcreteBaseAPIResponse(APIResponse[bytes]): ...
 
 
-class ConcreteAPIResponse(APIResponse[List[str]]):
-    ...
+class ConcreteAPIResponse(APIResponse[List[str]]): ...
 
 
-class ConcreteAsyncAPIResponse(APIResponse[httpx.Response]):
-    ...
+class ConcreteAsyncAPIResponse(APIResponse[httpx.Response]): ...
 
 
 def test_extract_response_type_direct_classes() -> None:
@@ -56,8 +53,7 @@ def test_extract_response_type_binary_response() -> None:
     assert extract_response_type(AsyncBinaryAPIResponse) == bytes
 
 
-class PydanticModel(pydantic.BaseModel):
-    ...
+class PydanticModel(pydantic.BaseModel): ...
 
 
 def test_response_parse_mismatched_basemodel(client: OpenAI) -> None:
diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py
index 690960802a..535935b9e1 100644
--- a/tests/test_utils/test_typing.py
+++ b/tests/test_utils/test_typing.py
@@ -9,24 +9,19 @@
 _T3 = TypeVar("_T3")
 
 
-class BaseGeneric(Generic[_T]):
-    ...
+class BaseGeneric(Generic[_T]): ...
 
 
-class SubclassGeneric(BaseGeneric[_T]):
-    ...
+class SubclassGeneric(BaseGeneric[_T]): ...
 
 
-class BaseGenericMultipleTypeArgs(Generic[_T, _T2, _T3]):
-    ...
+class BaseGenericMultipleTypeArgs(Generic[_T, _T2, _T3]): ...
 
 
-class SubclassGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T, _T2, _T3]):
-    ...
+class SubclassGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T, _T2, _T3]): ...
 
 
-class SubclassDifferentOrderGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T2, _T, _T3]):
-    ...
+class SubclassDifferentOrderGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T2, _T, _T3]): ...
 
 
 def test_extract_type_var() -> None: