diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 73f1b9f237..6eb007253c 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,27 +1,9 @@ -# syntax=docker/dockerfile:1 -FROM debian:bookworm-slim +ARG VARIANT="3.9" +FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} -RUN apt-get update && apt-get install -y \ - libxkbcommon0 \ - ca-certificates \ - make \ - curl \ - git \ - unzip \ - libc++1 \ - vim \ - termcap \ - && apt-get clean autoclean +USER vscode RUN curl -sSf https://rye-up.com/get | RYE_VERSION="0.15.2" RYE_INSTALL_OPTION="--yes" bash -ENV PATH=/root/.rye/shims:$PATH +ENV PATH=/home/vscode/.rye/shims:$PATH -WORKDIR /workspace - -COPY README.md .python-version pyproject.toml requirements.lock requirements-dev.lock /workspace/ - -RUN rye sync --all-features - -COPY . /workspace - -CMD ["rye", "shell"] +RUN echo "[[ -d .venv ]] && source .venv/bin/activate" >> /home/vscode/.bashrc diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index d55fc4d671..b9da964dc1 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -3,7 +3,26 @@ { "name": "Debian", "build": { - "dockerfile": "Dockerfile" + "dockerfile": "Dockerfile", + "context": ".." + }, + + "postStartCommand": "rye sync --all-features", + + "customizations": { + "vscode": { + "extensions": [ + "ms-python.python" + ], + "settings": { + "terminal.integrated.shell.linux": "/bin/bash", + "python.pythonPath": ".venv/bin/python", + "python.typeChecking": "basic", + "terminal.integrated.env.linux": { + "PATH": "/home/vscode/.rye/shims:${env:PATH}" + } + } + } } // Features to add to the dev container. More info: https://containers.dev/features. diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9c6a481f5b..d0ab6645f5 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.1.2" + ".": "1.2.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c97964977..1b58f41340 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,30 @@ # Changelog +## 1.2.0 (2023-11-08) + +Full Changelog: [v1.1.2...v1.2.0](https://github.com/openai/openai-python/compare/v1.1.2...v1.2.0) + +### Features + +* **api:** unify function types ([#741](https://github.com/openai/openai-python/issues/741)) ([ed16c4d](https://github.com/openai/openai-python/commit/ed16c4d2fec6cf4e33235d82b05ed9a777752204)) +* **client:** support passing chunk size for binary responses ([#747](https://github.com/openai/openai-python/issues/747)) ([c0c89b7](https://github.com/openai/openai-python/commit/c0c89b77a69ef098900e3a194894efcf72085d36)) + + +### Bug Fixes + +* **api:** update embedding response object type ([#739](https://github.com/openai/openai-python/issues/739)) ([29182c4](https://github.com/openai/openai-python/commit/29182c4818e2c56f46e961dba33e31dc30c25519)) +* **client:** show a helpful error message if the v0 API is used ([#743](https://github.com/openai/openai-python/issues/743)) ([920567c](https://github.com/openai/openai-python/commit/920567cb04df48a7f6cd2a3402a0b1f172c6290e)) + + +### Chores + +* **internal:** improve github devcontainer setup ([#737](https://github.com/openai/openai-python/issues/737)) ([0ac1abb](https://github.com/openai/openai-python/commit/0ac1abb07ec687a4f7b1150be10054dbd6e7cfbc)) + + +### Refactors + +* **api:** rename FunctionObject to FunctionDefinition ([#746](https://github.com/openai/openai-python/issues/746)) ([1afd138](https://github.com/openai/openai-python/commit/1afd13856c0e586ecbde8b24fe4f4bad9beeefdf)) + ## 1.1.2 (2023-11-08) Full Changelog: [v1.1.1...v1.1.2](https://github.com/openai/openai-python/compare/v1.1.1...v1.1.2) diff --git a/api.md b/api.md index 95e9922129..e0237803de 100644 --- a/api.md +++ b/api.md @@ -1,3 +1,9 @@ +# Shared Types + +```python +from openai.types import FunctionDefinition, FunctionParameters +``` + # Completions Types: diff --git a/pyproject.toml b/pyproject.toml index 0861b1278b..1900794dfc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.1.2" +version = "1.2.0" description = "Client library for the openai API" readme = "README.md" license = "Apache-2.0" diff --git a/src/openai/__init__.py b/src/openai/__init__.py index da1157a767..d92dfe969a 100644 --- a/src/openai/__init__.py +++ b/src/openai/__init__.py @@ -74,6 +74,7 @@ from .version import VERSION as VERSION from .lib.azure import AzureOpenAI as AzureOpenAI from .lib.azure import AsyncAzureOpenAI as AsyncAzureOpenAI +from .lib._old_api import * _setup_logging() diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index e37759cdf8..b2fe242634 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -1727,9 +1727,14 @@ def iter_raw(self, chunk_size: Optional[int] = None) -> Iterator[bytes]: return self.response.iter_raw(chunk_size) @override - def stream_to_file(self, file: str | os.PathLike[str]) -> None: + def stream_to_file( + self, + file: str | os.PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: with open(file, mode="wb") as f: - for data in self.response.iter_bytes(): + for data in self.response.iter_bytes(chunk_size): f.write(data) @override @@ -1757,10 +1762,15 @@ async def aiter_raw(self, chunk_size: Optional[int] = None) -> AsyncIterator[byt return self.response.aiter_raw(chunk_size) @override - async def astream_to_file(self, file: str | os.PathLike[str]) -> None: + async def astream_to_file( + self, + file: str | os.PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: path = anyio.Path(file) async with await path.open(mode="wb") as f: - async for data in self.response.aiter_bytes(): + async for data in self.response.aiter_bytes(chunk_size): await f.write(data) @override diff --git a/src/openai/_types.py b/src/openai/_types.py index dabd15866f..0d05be9493 100644 --- a/src/openai/_types.py +++ b/src/openai/_types.py @@ -123,7 +123,12 @@ def iter_raw(self, chunk_size: Optional[int] = None) -> Iterator[bytes]: pass @abstractmethod - def stream_to_file(self, file: str | PathLike[str]) -> None: + def stream_to_file( + self, + file: str | PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: """ Stream the output to the given file. """ @@ -172,7 +177,13 @@ async def aiter_raw(self, chunk_size: Optional[int] = None) -> AsyncIterator[byt """ pass - async def astream_to_file(self, file: str | PathLike[str]) -> None: + @abstractmethod + async def astream_to_file( + self, + file: str | PathLike[str], + *, + chunk_size: int | None = None, + ) -> None: """ Stream the output to the given file. """ diff --git a/src/openai/_version.py b/src/openai/_version.py index 848573b8a1..9d7e588fcf 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. __title__ = "openai" -__version__ = "1.1.2" # x-release-please-version +__version__ = "1.2.0" # x-release-please-version diff --git a/src/openai/lib/_old_api.py b/src/openai/lib/_old_api.py new file mode 100644 index 0000000000..c4038fcfaf --- /dev/null +++ b/src/openai/lib/_old_api.py @@ -0,0 +1,66 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing_extensions import override + +from .._utils import LazyProxy +from .._exceptions import OpenAIError + +INSTRUCTIONS = """ + +You tried to access openai.{symbol}, but this is no longer supported in openai>=1.0.0 - see the README at https://github.com/openai/openai-python for the API. + +You can run `openai migrate` to automatically upgrade your codebase to use the 1.0.0 interface. + +Alternatively, you can pin your installation to the old version, e.g. `pip install openai==0.28` + +A detailed migration guide is available here: https://github.com/openai/openai-python/discussions/742 +""" + + +class APIRemovedInV1(OpenAIError): + def __init__(self, *, symbol: str) -> None: + super().__init__(INSTRUCTIONS.format(symbol=symbol)) + + +class APIRemovedInV1Proxy(LazyProxy[None]): + def __init__(self, *, symbol: str) -> None: + super().__init__() + self._symbol = symbol + + @override + def __load__(self) -> None: + raise APIRemovedInV1(symbol=self._symbol) + + +SYMBOLS = [ + "Edit", + "File", + "Audio", + "Image", + "Model", + "Engine", + "Customer", + "FineTune", + "Embedding", + "Completion", + "Deployment", + "Moderation", + "ErrorObject", + "FineTuningJob", + "ChatCompletion", +] + +# we explicitly tell type checkers that nothing is exported +# from this file so that when we re-export the old symbols +# in `openai/__init__.py` they aren't added to the auto-complete +# suggestions given by editors +if TYPE_CHECKING: + __all__: list[str] = [] +else: + __all__ = SYMBOLS + + +__locals = locals() +for symbol in SYMBOLS: + __locals[symbol] = APIRemovedInV1Proxy(symbol=symbol) diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index 75e0d66d58..ff36424442 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -137,8 +137,18 @@ def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) - response_format: An object specifying the format that the model must output. Used to enable JSON - mode. + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in increased latency and appearance of a "stuck" request. Also + note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` + or the conversation exceeded the max context length. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and @@ -304,8 +314,18 @@ def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) - response_format: An object specifying the format that the model must output. Used to enable JSON - mode. + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in increased latency and appearance of a "stuck" request. Also + note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` + or the conversation exceeded the max context length. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and @@ -464,8 +484,18 @@ def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) - response_format: An object specifying the format that the model must output. Used to enable JSON - mode. + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in increased latency and appearance of a "stuck" request. Also + note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` + or the conversation exceeded the max context length. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and @@ -704,8 +734,18 @@ async def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) - response_format: An object specifying the format that the model must output. Used to enable JSON - mode. + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in increased latency and appearance of a "stuck" request. Also + note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` + or the conversation exceeded the max context length. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and @@ -871,8 +911,18 @@ async def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) - response_format: An object specifying the format that the model must output. Used to enable JSON - mode. + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in increased latency and appearance of a "stuck" request. Also + note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` + or the conversation exceeded the max context length. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and @@ -1031,8 +1081,18 @@ async def create( [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) - response_format: An object specifying the format that the model must output. Used to enable JSON - mode. + response_format: An object specifying the format that the model must output. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in increased latency and appearance of a "stuck" request. Also + note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` + or the conversation exceeded the max context length. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 8f21480d5e..1b4fca26ee 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -5,6 +5,8 @@ from .edit import Edit as Edit from .image import Image as Image from .model import Model as Model +from .shared import FunctionDefinition as FunctionDefinition +from .shared import FunctionParameters as FunctionParameters from .embedding import Embedding as Embedding from .fine_tune import FineTune as FineTune from .completion import Completion as Completion diff --git a/src/openai/types/beta/assistant.py b/src/openai/types/beta/assistant.py index e15282a69a..a21206765a 100644 --- a/src/openai/types/beta/assistant.py +++ b/src/openai/types/beta/assistant.py @@ -1,12 +1,13 @@ # File generated from our OpenAPI spec by Stainless. import builtins -from typing import Dict, List, Union, Optional +from typing import List, Union, Optional from typing_extensions import Literal +from ..shared import FunctionDefinition from ..._models import BaseModel -__all__ = ["Assistant", "Tool", "ToolCodeInterpreter", "ToolRetrieval", "ToolFunction", "ToolFunctionFunction"] +__all__ = ["Assistant", "Tool", "ToolCodeInterpreter", "ToolRetrieval", "ToolFunction"] class ToolCodeInterpreter(BaseModel): @@ -19,36 +20,8 @@ class ToolRetrieval(BaseModel): """The type of tool being defined: `retrieval`""" -class ToolFunctionFunction(BaseModel): - description: str - """ - A description of what the function does, used by the model to choose when and - how to call the function. - """ - - name: str - """The name of the function to be called. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - parameters: Dict[str, builtins.object] - """The parameters the functions accepts, described as a JSON Schema object. - - See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) - for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - documentation about the format. - - To describe a function that accepts no parameters, provide the value - `{"type": "object", "properties": {}}`. - """ - - class ToolFunction(BaseModel): - function: ToolFunctionFunction - """The function definition.""" + function: FunctionDefinition type: Literal["function"] """The type of tool being defined: `function`""" diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index 8272d5eb4d..539897a7ba 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -2,16 +2,17 @@ from __future__ import annotations -from typing import Dict, List, Union, Optional +from typing import List, Union, Optional from typing_extensions import Literal, Required, TypedDict +from ...types import shared_params + __all__ = [ "AssistantCreateParams", "Tool", "ToolAssistantToolsCode", "ToolAssistantToolsRetrieval", "ToolAssistantToolsFunction", - "ToolAssistantToolsFunctionFunction", ] @@ -71,36 +72,8 @@ class ToolAssistantToolsRetrieval(TypedDict, total=False): """The type of tool being defined: `retrieval`""" -class ToolAssistantToolsFunctionFunction(TypedDict, total=False): - description: Required[str] - """ - A description of what the function does, used by the model to choose when and - how to call the function. - """ - - name: Required[str] - """The name of the function to be called. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - parameters: Required[Dict[str, object]] - """The parameters the functions accepts, described as a JSON Schema object. - - See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) - for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - documentation about the format. - - To describe a function that accepts no parameters, provide the value - `{"type": "object", "properties": {}}`. - """ - - class ToolAssistantToolsFunction(TypedDict, total=False): - function: Required[ToolAssistantToolsFunctionFunction] - """The function definition.""" + function: Required[shared_params.FunctionDefinition] type: Required[Literal["function"]] """The type of tool being defined: `function`""" diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index 3916833b77..a0efd96ecd 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -2,16 +2,17 @@ from __future__ import annotations -from typing import Dict, List, Union, Optional +from typing import List, Union, Optional from typing_extensions import Literal, Required, TypedDict +from ...types import shared_params + __all__ = [ "AssistantUpdateParams", "Tool", "ToolAssistantToolsCode", "ToolAssistantToolsRetrieval", "ToolAssistantToolsFunction", - "ToolAssistantToolsFunctionFunction", ] @@ -73,36 +74,8 @@ class ToolAssistantToolsRetrieval(TypedDict, total=False): """The type of tool being defined: `retrieval`""" -class ToolAssistantToolsFunctionFunction(TypedDict, total=False): - description: Required[str] - """ - A description of what the function does, used by the model to choose when and - how to call the function. - """ - - name: Required[str] - """The name of the function to be called. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - parameters: Required[Dict[str, object]] - """The parameters the functions accepts, described as a JSON Schema object. - - See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) - for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - documentation about the format. - - To describe a function that accepts no parameters, provide the value - `{"type": "object", "properties": {}}`. - """ - - class ToolAssistantToolsFunction(TypedDict, total=False): - function: Required[ToolAssistantToolsFunctionFunction] - """The function definition.""" + function: Required[shared_params.FunctionDefinition] type: Required[Literal["function"]] """The type of tool being defined: `function`""" diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index d7391d4d62..9f58dcd875 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -2,9 +2,11 @@ from __future__ import annotations -from typing import Dict, List, Union, Optional +from typing import List, Union, Optional from typing_extensions import Literal, Required, TypedDict +from ...types import shared_params + __all__ = [ "ThreadCreateAndRunParams", "Thread", @@ -13,7 +15,6 @@ "ToolAssistantToolsCode", "ToolAssistantToolsRetrieval", "ToolAssistantToolsFunction", - "ToolAssistantToolsFunctionFunction", ] @@ -110,36 +111,8 @@ class ToolAssistantToolsRetrieval(TypedDict, total=False): """The type of tool being defined: `retrieval`""" -class ToolAssistantToolsFunctionFunction(TypedDict, total=False): - description: Required[str] - """ - A description of what the function does, used by the model to choose when and - how to call the function. - """ - - name: Required[str] - """The name of the function to be called. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - parameters: Required[Dict[str, object]] - """The parameters the functions accepts, described as a JSON Schema object. - - See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) - for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - documentation about the format. - - To describe a function that accepts no parameters, provide the value - `{"type": "object", "properties": {}}`. - """ - - class ToolAssistantToolsFunction(TypedDict, total=False): - function: Required[ToolAssistantToolsFunctionFunction] - """The function definition.""" + function: Required[shared_params.FunctionDefinition] type: Required[Literal["function"]] """The type of tool being defined: `function`""" diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index d30a32ec97..ffbba1e504 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -1,9 +1,10 @@ # File generated from our OpenAPI spec by Stainless. import builtins -from typing import Dict, List, Union, Optional +from typing import List, Union, Optional from typing_extensions import Literal +from ...shared import FunctionDefinition from ...._models import BaseModel from .required_action_function_tool_call import RequiredActionFunctionToolCall @@ -16,7 +17,6 @@ "ToolAssistantToolsCode", "ToolAssistantToolsRetrieval", "ToolAssistantToolsFunction", - "ToolAssistantToolsFunctionFunction", ] @@ -51,36 +51,8 @@ class ToolAssistantToolsRetrieval(BaseModel): """The type of tool being defined: `retrieval`""" -class ToolAssistantToolsFunctionFunction(BaseModel): - description: str - """ - A description of what the function does, used by the model to choose when and - how to call the function. - """ - - name: str - """The name of the function to be called. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - parameters: Dict[str, builtins.object] - """The parameters the functions accepts, described as a JSON Schema object. - - See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) - for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - documentation about the format. - - To describe a function that accepts no parameters, provide the value - `{"type": "object", "properties": {}}`. - """ - - class ToolAssistantToolsFunction(BaseModel): - function: ToolAssistantToolsFunctionFunction - """The function definition.""" + function: FunctionDefinition type: Literal["function"] """The type of tool being defined: `function`""" @@ -147,8 +119,8 @@ class Run(BaseModel): this run. """ - object: Literal["assistant.run"] - """The object type, which is always `assistant.run`.""" + object: Literal["thread.run"] + """The object type, which is always `thread.run`.""" required_action: Optional[RequiredAction] """Details on the action required to continue the run. diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index cf1bb9f05d..df92f4fd2c 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -2,16 +2,17 @@ from __future__ import annotations -from typing import Dict, List, Union, Optional +from typing import List, Union, Optional from typing_extensions import Literal, Required, TypedDict +from ....types import shared_params + __all__ = [ "RunCreateParams", "Tool", "ToolAssistantToolsCode", "ToolAssistantToolsRetrieval", "ToolAssistantToolsFunction", - "ToolAssistantToolsFunctionFunction", ] @@ -62,36 +63,8 @@ class ToolAssistantToolsRetrieval(TypedDict, total=False): """The type of tool being defined: `retrieval`""" -class ToolAssistantToolsFunctionFunction(TypedDict, total=False): - description: Required[str] - """ - A description of what the function does, used by the model to choose when and - how to call the function. - """ - - name: Required[str] - """The name of the function to be called. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - parameters: Required[Dict[str, object]] - """The parameters the functions accepts, described as a JSON Schema object. - - See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) - for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - documentation about the format. - - To describe a function that accepts no parameters, provide the value - `{"type": "object", "properties": {}}`. - """ - - class ToolAssistantToolsFunction(TypedDict, total=False): - function: Required[ToolAssistantToolsFunctionFunction] - """The function definition.""" + function: Required[shared_params.FunctionDefinition] type: Required[Literal["function"]] """The type of tool being defined: `function`""" diff --git a/src/openai/types/beta/threads/runs/run_step.py b/src/openai/types/beta/threads/runs/run_step.py index 17a567dc0e..536cf04ab1 100644 --- a/src/openai/types/beta/threads/runs/run_step.py +++ b/src/openai/types/beta/threads/runs/run_step.py @@ -65,8 +65,8 @@ class RunStep(BaseModel): a maxium of 512 characters long. """ - object: Literal["assistant.run.step"] - """The object type, which is always `assistant.run.step``.""" + object: Literal["thread.run.step"] + """The object type, which is always `thread.run.step``.""" run_id: str """ @@ -76,8 +76,8 @@ class RunStep(BaseModel): status: Literal["in_progress", "cancelled", "failed", "completed", "expired"] """ - The status of the run, which can be either `in_progress`, `cancelled`, `failed`, - `completed`, or `expired`. + The status of the run step, which can be either `in_progress`, `cancelled`, + `failed`, `completed`, or `expired`. """ step_details: StepDetails diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index 568f530280..6be046b01e 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -111,8 +111,8 @@ class ChatCompletionChunk(BaseModel): """The object type, which is always `chat.completion.chunk`.""" system_fingerprint: Optional[str] = None - """This fingerprint represents the backend configuration that the model runs with. - + """ + This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. """ diff --git a/src/openai/types/chat/chat_completion_tool_param.py b/src/openai/types/chat/chat_completion_tool_param.py index 4b7e6238c7..54c223955e 100644 --- a/src/openai/types/chat/chat_completion_tool_param.py +++ b/src/openai/types/chat/chat_completion_tool_param.py @@ -2,41 +2,15 @@ from __future__ import annotations -from typing import Dict from typing_extensions import Literal, Required, TypedDict -__all__ = ["ChatCompletionToolParam", "Function"] +from ...types import shared_params - -class Function(TypedDict, total=False): - name: Required[str] - """The name of the function to be called. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - parameters: Required[Dict[str, object]] - """The parameters the functions accepts, described as a JSON Schema object. - - See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) - for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - documentation about the format. - - To describe a function that accepts no parameters, provide the value - `{"type": "object", "properties": {}}`. - """ - - description: str - """ - A description of what the function does, used by the model to choose when and - how to call the function. - """ +__all__ = ["ChatCompletionToolParam"] class ChatCompletionToolParam(TypedDict, total=False): - function: Required[Function] + function: Required[shared_params.FunctionDefinition] type: Required[Literal["function"]] """The type of the tool. Currently, only `function` is supported.""" diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index b310761077..51c864588b 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -5,6 +5,7 @@ from typing import Dict, List, Union, Optional from typing_extensions import Literal, Required, TypedDict +from ...types import shared_params from .chat_completion_tool_param import ChatCompletionToolParam from .chat_completion_message_param import ChatCompletionMessageParam from .chat_completion_tool_choice_option_param import ( @@ -121,7 +122,16 @@ class CompletionCreateParamsBase(TypedDict, total=False): response_format: ResponseFormat """An object specifying the format that the model must output. - Used to enable JSON mode. + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in increased latency and appearance of a "stuck" request. Also + note that the message content may be partially cut off if + `finish_reason="length"`, which indicates the generation exceeded `max_tokens` + or the conversation exceeded the max context length. """ seed: Optional[int] @@ -193,7 +203,7 @@ class Function(TypedDict, total=False): of 64. """ - parameters: Required[Dict[str, object]] + parameters: Required[shared_params.FunctionParameters] """The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) @@ -214,19 +224,7 @@ class Function(TypedDict, total=False): class ResponseFormat(TypedDict, total=False): type: Literal["text", "json_object"] - """Setting to `json_object` enables JSON mode. - - This guarantees that the message the model generates is valid JSON. - - Note that your system prompt must still instruct the model to produce JSON, and - to help ensure you don't forget, the API will throw an error if the string - `JSON` does not appear in your system message. Also note that the message - content may be partial (i.e. cut off) if `finish_reason="length"`, which - indicates the generation exceeded `max_tokens` or the conversation exceeded the - max context length. - - Must be one of `text` or `json_object`. - """ + """Must be one of `text` or `json_object`.""" class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase): diff --git a/src/openai/types/completion_choice.py b/src/openai/types/completion_choice.py index e86d706ed1..71de0f9247 100644 --- a/src/openai/types/completion_choice.py +++ b/src/openai/types/completion_choice.py @@ -15,7 +15,7 @@ class Logprobs(BaseModel): tokens: Optional[List[str]] = None - top_logprobs: Optional[List[Dict[str, int]]] = None + top_logprobs: Optional[List[Dict[str, float]]] = None class CompletionChoice(BaseModel): diff --git a/src/openai/types/create_embedding_response.py b/src/openai/types/create_embedding_response.py index 7382bed6b9..bf64037e16 100644 --- a/src/openai/types/create_embedding_response.py +++ b/src/openai/types/create_embedding_response.py @@ -24,8 +24,8 @@ class CreateEmbeddingResponse(BaseModel): model: str """The name of the model used to generate the embedding.""" - object: Literal["embedding"] - """The object type, which is always "embedding".""" + object: Literal["list"] + """The object type, which is always "list".""" usage: Usage """The usage information for the request.""" diff --git a/src/openai/types/shared/__init__.py b/src/openai/types/shared/__init__.py new file mode 100644 index 0000000000..05bc4ff9ba --- /dev/null +++ b/src/openai/types/shared/__init__.py @@ -0,0 +1,4 @@ +# File generated from our OpenAPI spec by Stainless. + +from .function_definition import FunctionDefinition as FunctionDefinition +from .function_parameters import FunctionParameters as FunctionParameters diff --git a/src/openai/types/shared/function_definition.py b/src/openai/types/shared/function_definition.py new file mode 100644 index 0000000000..bfcee50c85 --- /dev/null +++ b/src/openai/types/shared/function_definition.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. + +from typing import Optional + +from ..._models import BaseModel +from .function_parameters import FunctionParameters + +__all__ = ["FunctionDefinition"] + + +class FunctionDefinition(BaseModel): + name: str + """The name of the function to be called. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + parameters: FunctionParameters + """The parameters the functions accepts, described as a JSON Schema object. + + See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) + for examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + documentation about the format. + + To describe a function that accepts no parameters, provide the value + `{"type": "object", "properties": {}}`. + """ + + description: Optional[str] = None + """ + A description of what the function does, used by the model to choose when and + how to call the function. + """ diff --git a/src/openai/types/shared/function_parameters.py b/src/openai/types/shared/function_parameters.py new file mode 100644 index 0000000000..405c2d14cc --- /dev/null +++ b/src/openai/types/shared/function_parameters.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. + +from typing import Dict + +__all__ = ["FunctionParameters"] + +FunctionParameters = Dict[str, object] diff --git a/src/openai/types/shared_params/__init__.py b/src/openai/types/shared_params/__init__.py new file mode 100644 index 0000000000..05bc4ff9ba --- /dev/null +++ b/src/openai/types/shared_params/__init__.py @@ -0,0 +1,4 @@ +# File generated from our OpenAPI spec by Stainless. + +from .function_definition import FunctionDefinition as FunctionDefinition +from .function_parameters import FunctionParameters as FunctionParameters diff --git a/src/openai/types/shared_params/function_definition.py b/src/openai/types/shared_params/function_definition.py new file mode 100644 index 0000000000..6bb6fa6ff2 --- /dev/null +++ b/src/openai/types/shared_params/function_definition.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +from ...types import shared_params + +__all__ = ["FunctionDefinition"] + + +class FunctionDefinition(TypedDict, total=False): + name: Required[str] + """The name of the function to be called. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + parameters: Required[shared_params.FunctionParameters] + """The parameters the functions accepts, described as a JSON Schema object. + + See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) + for examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + documentation about the format. + + To describe a function that accepts no parameters, provide the value + `{"type": "object", "properties": {}}`. + """ + + description: str + """ + A description of what the function does, used by the model to choose when and + how to call the function. + """ diff --git a/src/openai/types/shared_params/function_parameters.py b/src/openai/types/shared_params/function_parameters.py new file mode 100644 index 0000000000..a405f6b2e2 --- /dev/null +++ b/src/openai/types/shared_params/function_parameters.py @@ -0,0 +1,9 @@ +# File generated from our OpenAPI spec by Stainless. + +from __future__ import annotations + +from typing import Dict + +__all__ = ["FunctionParameters"] + +FunctionParameters = Dict[str, object]