diff --git a/.stats.yml b/.stats.yml index d518bac586..e49b5c56e8 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3904ef6b29a89c98f93a9b7da19879695f3c440564be6384db7af1b734611ede.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-6204952a29973265b9c0d66fc67ffaf53c6a90ae4d75cdacf9d147676f5274c9.yml diff --git a/api.md b/api.md index e593464bf3..630f003fe8 100644 --- a/api.md +++ b/api.md @@ -5,6 +5,7 @@ from openai.types import ( ErrorObject, FunctionDefinition, FunctionParameters, + Metadata, ResponseFormatJSONObject, ResponseFormatJSONSchema, ResponseFormatText, diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index 6a09825e59..6cc3b9881c 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -77,8 +77,8 @@ def create( Whisper V2 model) is currently available. language: The language of the input audio. Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will - improve accuracy and latency. + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. prompt: An optional text to guide the model's style or continue a previous audio segment. The @@ -189,8 +189,8 @@ async def create( Whisper V2 model) is currently available. language: The language of the input audio. Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will - improve accuracy and latency. + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. prompt: An optional text to guide the model's style or continue a previous audio segment. The diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index a496645a42..7e7ec19ec2 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Dict, Optional +from typing import Optional from typing_extensions import Literal import httpx @@ -20,6 +20,7 @@ from ..pagination import SyncCursorPage, AsyncCursorPage from ..types.batch import Batch from .._base_client import AsyncPaginator, make_request_options +from ..types.shared_params.metadata import Metadata __all__ = ["Batches", "AsyncBatches"] @@ -50,7 +51,7 @@ def create( completion_window: Literal["24h"], endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -80,7 +81,12 @@ def create( and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size. - metadata: Optional custom metadata for the batch. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers @@ -255,7 +261,7 @@ async def create( completion_window: Literal["24h"], endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -285,7 +291,12 @@ async def create( and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size. - metadata: Optional custom metadata for the batch. + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 2f2482b648..65b7c9cfc2 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -26,6 +26,7 @@ from ...types.chat_model import ChatModel from ...types.beta.assistant import Assistant from ...types.beta.assistant_deleted import AssistantDeleted +from ...types.shared_params.metadata import Metadata from ...types.beta.assistant_tool_param import AssistantToolParam from ...types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -58,7 +59,7 @@ def create( model: Union[str, ChatModel], description: Optional[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -88,9 +89,11 @@ def create( characters. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. name: The name of the assistant. The maximum length is 256 characters. @@ -206,7 +209,7 @@ def update( *, description: Optional[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: str | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -232,9 +235,11 @@ def update( characters. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to @@ -444,7 +449,7 @@ async def create( model: Union[str, ChatModel], description: Optional[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -474,9 +479,11 @@ async def create( characters. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. name: The name of the assistant. The maximum length is 256 characters. @@ -592,7 +599,7 @@ async def update( *, description: Optional[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: str | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -618,9 +625,11 @@ async def update( characters. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py index b920c89207..4b337b7c19 100644 --- a/src/openai/resources/beta/realtime/sessions.py +++ b/src/openai/resources/beta/realtime/sessions.py @@ -89,8 +89,11 @@ def create( input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs - asynchronously through Whisper and should be treated as rough guidance rather - than the representation understood by the model. + asynchronously through + [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as rough guidance rather than the representation + understood by the model. The client can optionally set the language and prompt + for transcription, these fields will be passed to the Whisper API. instructions: The default system instructions (i.e. system message) prepended to model calls. This field allows the client to guide the model on desired responses. The model @@ -232,8 +235,11 @@ async def create( input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs - asynchronously through Whisper and should be treated as rough guidance rather - than the representation understood by the model. + asynchronously through + [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as rough guidance rather than the representation + understood by the model. The client can optionally set the language and prompt + for transcription, these fields will be passed to the Whisper API. instructions: The default system instructions (i.e. system message) prepended to model calls. This field allows the client to guide the model on desired responses. The model diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py index 8be4883189..403f95443f 100644 --- a/src/openai/resources/beta/threads/messages.py +++ b/src/openai/resources/beta/threads/messages.py @@ -20,6 +20,7 @@ from ...._base_client import AsyncPaginator, make_request_options from ....types.beta.threads import message_list_params, message_create_params, message_update_params from ....types.beta.threads.message import Message +from ....types.shared_params.metadata import Metadata from ....types.beta.threads.message_deleted import MessageDeleted from ....types.beta.threads.message_content_part_param import MessageContentPartParam @@ -53,7 +54,7 @@ def create( content: Union[str, Iterable[MessageContentPartParam]], role: Literal["user", "assistant"], attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -78,9 +79,11 @@ def create( attachments: A list of files attached to the message, and the tools they should be added to. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers @@ -152,7 +155,7 @@ def update( message_id: str, *, thread_id: str, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -165,9 +168,11 @@ def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers @@ -327,7 +332,7 @@ async def create( content: Union[str, Iterable[MessageContentPartParam]], role: Literal["user", "assistant"], attachments: Optional[Iterable[message_create_params.Attachment]] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -352,9 +357,11 @@ async def create( attachments: A list of files attached to the message, and the tools they should be added to. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers @@ -426,7 +433,7 @@ async def update( message_id: str, *, thread_id: str, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -439,9 +446,11 @@ async def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index ca354297c6..a1c93d0382 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -36,6 +36,7 @@ run_submit_tool_outputs_params, ) from .....types.beta.threads.run import Run +from .....types.shared_params.metadata import Metadata from .....types.beta.assistant_tool_param import AssistantToolParam from .....types.beta.assistant_stream_event import AssistantStreamEvent from .....types.beta.threads.runs.run_step_include import RunStepInclude @@ -81,7 +82,7 @@ def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -137,9 +138,11 @@ def create( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -222,7 +225,7 @@ def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -281,9 +284,11 @@ def create( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -362,7 +367,7 @@ def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -421,9 +426,11 @@ def create( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -501,7 +508,7 @@ def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -598,7 +605,7 @@ def update( run_id: str, *, thread_id: str, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -611,9 +618,11 @@ def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers @@ -929,7 +938,7 @@ async def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -985,9 +994,11 @@ async def create( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -1070,7 +1081,7 @@ async def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -1129,9 +1140,11 @@ async def create( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -1210,7 +1223,7 @@ async def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -1269,9 +1282,11 @@ async def create( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -1349,7 +1364,7 @@ async def create( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -1446,7 +1461,7 @@ async def update( run_id: str, *, thread_id: str, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1459,9 +1474,11 @@ async def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. extra_headers: Send extra headers diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index bd8205d933..a9c473e28e 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -44,6 +44,7 @@ from ....types.beta.thread import Thread from ....types.beta.threads.run import Run from ....types.beta.thread_deleted import ThreadDeleted +from ....types.shared_params.metadata import Metadata from ....types.beta.assistant_stream_event import AssistantStreamEvent from ....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam from ....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -83,7 +84,7 @@ def create( self, *, messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -100,9 +101,11 @@ def create( start the thread with. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the @@ -172,7 +175,7 @@ def update( self, thread_id: str, *, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -186,9 +189,11 @@ def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the @@ -263,7 +268,7 @@ def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -306,9 +311,11 @@ def create_and_run( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -348,7 +355,8 @@ def create_and_run( make the output more random, while lower values like 0.2 will make it more focused and deterministic. - thread: If no thread is provided, an empty thread will be created. + thread: Options to create a new thread. If no thread is provided when running a request, + an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value @@ -394,7 +402,7 @@ def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -440,9 +448,11 @@ def create_and_run( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -478,7 +488,8 @@ def create_and_run( make the output more random, while lower values like 0.2 will make it more focused and deterministic. - thread: If no thread is provided, an empty thread will be created. + thread: Options to create a new thread. If no thread is provided when running a request, + an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value @@ -524,7 +535,7 @@ def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -570,9 +581,11 @@ def create_and_run( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -608,7 +621,8 @@ def create_and_run( make the output more random, while lower values like 0.2 will make it more focused and deterministic. - thread: If no thread is provided, an empty thread will be created. + thread: Options to create a new thread. If no thread is provided when running a request, + an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value @@ -653,7 +667,7 @@ def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -737,7 +751,7 @@ async def create( self, *, messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, tool_resources: Optional[thread_create_params.ToolResources] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -754,9 +768,11 @@ async def create( start the thread with. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the @@ -826,7 +842,7 @@ async def update( self, thread_id: str, *, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, tool_resources: Optional[thread_update_params.ToolResources] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -840,9 +856,11 @@ async def update( Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the @@ -917,7 +935,7 @@ async def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -960,9 +978,11 @@ async def create_and_run( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -1002,7 +1022,8 @@ async def create_and_run( make the output more random, while lower values like 0.2 will make it more focused and deterministic. - thread: If no thread is provided, an empty thread will be created. + thread: Options to create a new thread. If no thread is provided when running a request, + an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value @@ -1048,7 +1069,7 @@ async def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -1094,9 +1115,11 @@ async def create_and_run( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -1132,7 +1155,8 @@ async def create_and_run( make the output more random, while lower values like 0.2 will make it more focused and deterministic. - thread: If no thread is provided, an empty thread will be created. + thread: Options to create a new thread. If no thread is provided when running a request, + an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value @@ -1178,7 +1202,7 @@ async def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, @@ -1224,9 +1248,11 @@ async def create_and_run( `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the @@ -1262,7 +1288,8 @@ async def create_and_run( make the output more random, while lower values like 0.2 will make it more focused and deterministic. - thread: If no thread is provided, an empty thread will be created. + thread: Options to create a new thread. If no thread is provided when running a request, + an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value @@ -1307,7 +1334,7 @@ async def create_and_run( instructions: Optional[str] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_prompt_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, model: Union[str, ChatModel, None] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/beta/vector_stores/vector_stores.py b/src/openai/resources/beta/vector_stores/vector_stores.py index 6b44c602f1..1da52fb3c7 100644 --- a/src/openai/resources/beta/vector_stores/vector_stores.py +++ b/src/openai/resources/beta/vector_stores/vector_stores.py @@ -41,6 +41,7 @@ ) from ...._base_client import AsyncPaginator, make_request_options from ....types.beta.vector_store import VectorStore +from ....types.shared_params.metadata import Metadata from ....types.beta.vector_store_deleted import VectorStoreDeleted from ....types.beta.file_chunking_strategy_param import FileChunkingStrategyParam @@ -81,7 +82,7 @@ def create( chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -104,9 +105,11 @@ def create( files. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. name: The name of the vector store. @@ -176,7 +179,7 @@ def update( vector_store_id: str, *, expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -192,9 +195,11 @@ def update( expires_after: The expiration policy for a vector store. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. name: The name of the vector store. @@ -359,7 +364,7 @@ async def create( chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -382,9 +387,11 @@ async def create( files. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. name: The name of the vector store. @@ -454,7 +461,7 @@ async def update( vector_store_id: str, *, expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN, - metadata: Optional[object] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -470,9 +477,11 @@ async def update( expires_after: The expiration policy for a vector store. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format. Keys - can be a maximum of 64 characters long and values can be a maximum of 512 - characters long. + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. name: The name of the vector store. diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index c44b9d0c30..2b91659950 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -26,6 +26,7 @@ from ..._base_client import make_request_options from ...types.chat_model import ChatModel from ...types.chat.chat_completion import ChatCompletion +from ...types.shared_params.metadata import Metadata from ...types.chat.chat_completion_chunk import ChatCompletionChunk from ...types.chat.chat_completion_modality import ChatCompletionModality from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam @@ -73,7 +74,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -177,8 +178,12 @@ def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). - metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/chat-completions). + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. modalities: Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default: @@ -244,9 +249,9 @@ def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarentee. + latency guarantee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. + tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. stop: Up to 4 sequences where the API will stop generating further tokens. @@ -322,7 +327,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -432,8 +437,12 @@ def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). - metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/chat-completions). + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. modalities: Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default: @@ -499,9 +508,9 @@ def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarentee. + latency guarantee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. + tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. stop: Up to 4 sequences where the API will stop generating further tokens. @@ -570,7 +579,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -680,8 +689,12 @@ def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). - metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/chat-completions). + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. modalities: Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default: @@ -747,9 +760,9 @@ def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarentee. + latency guarantee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. + tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. stop: Up to 4 sequences where the API will stop generating further tokens. @@ -817,7 +830,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -924,7 +937,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -1028,8 +1041,12 @@ async def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). - metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/chat-completions). + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. modalities: Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default: @@ -1095,9 +1112,9 @@ async def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarentee. + latency guarantee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. + tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. stop: Up to 4 sequences where the API will stop generating further tokens. @@ -1173,7 +1190,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -1283,8 +1300,12 @@ async def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). - metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/chat-completions). + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. modalities: Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default: @@ -1350,9 +1371,9 @@ async def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarentee. + latency guarantee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. + tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. stop: Up to 4 sequences where the API will stop generating further tokens. @@ -1421,7 +1442,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, @@ -1531,8 +1552,12 @@ async def create( compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). - metadata: Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/chat-completions). + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. modalities: Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default: @@ -1598,9 +1623,9 @@ async def create( utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarentee. + latency guarantee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. + tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. stop: Up to 4 sequences where the API will stop generating further tokens. @@ -1668,7 +1693,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, parallel_tool_calls: bool | NotGiven = NOT_GIVEN, diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 72950f2491..7abb22f239 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -6,6 +6,7 @@ from .image import Image as Image from .model import Model as Model from .shared import ( + Metadata as Metadata, ErrorObject as ErrorObject, FunctionDefinition as FunctionDefinition, FunctionParameters as FunctionParameters, diff --git a/src/openai/types/audio/transcription_create_params.py b/src/openai/types/audio/transcription_create_params.py index 88805affbd..f1779c35e6 100644 --- a/src/openai/types/audio/transcription_create_params.py +++ b/src/openai/types/audio/transcription_create_params.py @@ -30,8 +30,8 @@ class TranscriptionCreateParams(TypedDict, total=False): """The language of the input audio. Supplying the input language in - [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will - improve accuracy and latency. + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. """ prompt: str diff --git a/src/openai/types/batch.py b/src/openai/types/batch.py index ac3d7ea119..35de90ac85 100644 --- a/src/openai/types/batch.py +++ b/src/openai/types/batch.py @@ -1,11 +1,11 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import builtins from typing import List, Optional from typing_extensions import Literal from .._models import BaseModel from .batch_error import BatchError +from .shared.metadata import Metadata from .batch_request_counts import BatchRequestCounts __all__ = ["Batch", "Errors"] @@ -70,12 +70,14 @@ class Batch(BaseModel): in_progress_at: Optional[int] = None """The Unix timestamp (in seconds) for when the batch started processing.""" - metadata: Optional[builtins.object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ output_file_id: Optional[str] = None diff --git a/src/openai/types/batch_create_params.py b/src/openai/types/batch_create_params.py index b30c4d4658..e5be1d2bac 100644 --- a/src/openai/types/batch_create_params.py +++ b/src/openai/types/batch_create_params.py @@ -2,9 +2,11 @@ from __future__ import annotations -from typing import Dict, Optional +from typing import Optional from typing_extensions import Literal, Required, TypedDict +from .shared_params.metadata import Metadata + __all__ = ["BatchCreateParams"] @@ -35,5 +37,12 @@ class BatchCreateParams(TypedDict, total=False): requests, and can be up to 200 MB in size. """ - metadata: Optional[Dict[str, str]] - """Optional custom metadata for the batch.""" + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ diff --git a/src/openai/types/beta/assistant.py b/src/openai/types/beta/assistant.py index 3c8b8e403b..58421e0f66 100644 --- a/src/openai/types/beta/assistant.py +++ b/src/openai/types/beta/assistant.py @@ -5,6 +5,7 @@ from ..._models import BaseModel from .assistant_tool import AssistantTool +from ..shared.metadata import Metadata from .assistant_response_format_option import AssistantResponseFormatOption __all__ = ["Assistant", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] @@ -51,12 +52,14 @@ class Assistant(BaseModel): The maximum length is 256,000 characters. """ - metadata: Optional[object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ model: str diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index 568b223ce7..e205856395 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -7,6 +7,7 @@ from ..chat_model import ChatModel from .assistant_tool_param import AssistantToolParam +from ..shared_params.metadata import Metadata from .file_chunking_strategy_param import FileChunkingStrategyParam from .assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -39,12 +40,14 @@ class AssistantCreateParams(TypedDict, total=False): The maximum length is 256,000 characters. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ name: Optional[str] @@ -130,12 +133,14 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False): store. """ - metadata: object - """Set of 16 key-value pairs that can be attached to a vector store. + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. - This can be useful for storing additional information about the vector store in - a structured format. Keys can be a maximum of 64 characters long and values can - be a maximum of 512 characters long. + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index 9a66e41ab3..35065ef61b 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -6,6 +6,7 @@ from typing_extensions import TypedDict from .assistant_tool_param import AssistantToolParam +from ..shared_params.metadata import Metadata from .assistant_response_format_option_param import AssistantResponseFormatOptionParam __all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] @@ -21,12 +22,14 @@ class AssistantUpdateParams(TypedDict, total=False): The maximum length is 256,000 characters. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ model: str diff --git a/src/openai/types/beta/realtime/conversation_item_create_event.py b/src/openai/types/beta/realtime/conversation_item_create_event.py index c4f72b9aff..f19d552a92 100644 --- a/src/openai/types/beta/realtime/conversation_item_create_event.py +++ b/src/openai/types/beta/realtime/conversation_item_create_event.py @@ -20,10 +20,10 @@ class ConversationItemCreateEvent(BaseModel): """Optional client-generated ID used to identify this event.""" previous_item_id: Optional[str] = None - """ - The ID of the preceding item after which the new item will be inserted. If not - set, the new item will be appended to the end of the conversation. If set to - `root`, the new item will be added to the beginning of the conversation. If set - to an existing ID, it allows an item to be inserted mid-conversation. If the ID - cannot be found, an error will be returned and the item will not be added. + """The ID of the preceding item after which the new item will be inserted. + + If not set, the new item will be appended to the end of the conversation. If set + to `root`, the new item will be added to the beginning of the conversation. If + set to an existing ID, it allows an item to be inserted mid-conversation. If the + ID cannot be found, an error will be returned and the item will not be added. """ diff --git a/src/openai/types/beta/realtime/conversation_item_create_event_param.py b/src/openai/types/beta/realtime/conversation_item_create_event_param.py index 6da5a63a9d..693d0fd54d 100644 --- a/src/openai/types/beta/realtime/conversation_item_create_event_param.py +++ b/src/openai/types/beta/realtime/conversation_item_create_event_param.py @@ -20,10 +20,10 @@ class ConversationItemCreateEventParam(TypedDict, total=False): """Optional client-generated ID used to identify this event.""" previous_item_id: str - """ - The ID of the preceding item after which the new item will be inserted. If not - set, the new item will be appended to the end of the conversation. If set to - `root`, the new item will be added to the beginning of the conversation. If set - to an existing ID, it allows an item to be inserted mid-conversation. If the ID - cannot be found, an error will be returned and the item will not be added. + """The ID of the preceding item after which the new item will be inserted. + + If not set, the new item will be appended to the end of the conversation. If set + to `root`, the new item will be added to the beginning of the conversation. If + set to an existing ID, it allows an item to be inserted mid-conversation. If the + ID cannot be found, an error will be returned and the item will not be added. """ diff --git a/src/openai/types/beta/realtime/realtime_response.py b/src/openai/types/beta/realtime/realtime_response.py index 3e1b1406c0..4c3c83d666 100644 --- a/src/openai/types/beta/realtime/realtime_response.py +++ b/src/openai/types/beta/realtime/realtime_response.py @@ -1,9 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional +from typing import List, Union, Optional from typing_extensions import Literal from ...._models import BaseModel +from ...shared.metadata import Metadata from .conversation_item import ConversationItem from .realtime_response_usage import RealtimeResponseUsage from .realtime_response_status import RealtimeResponseStatus @@ -15,8 +16,40 @@ class RealtimeResponse(BaseModel): id: Optional[str] = None """The unique ID of the response.""" - metadata: Optional[object] = None - """Developer-provided string key-value pairs associated with this response.""" + conversation_id: Optional[str] = None + """ + Which conversation the response is added to, determined by the `conversation` + field in the `response.create` event. If `auto`, the response will be added to + the default conversation and the value of `conversation_id` will be an id like + `conv_1234`. If `none`, the response will not be added to any conversation and + the value of `conversation_id` will be `null`. If responses are being triggered + by server VAD, the response will be added to the default conversation, thus the + `conversation_id` will be an id like `conv_1234`. + """ + + max_output_tokens: Union[int, Literal["inf"], None] = None + """ + Maximum number of output tokens for a single assistant response, inclusive of + tool calls, that was used in this response. + """ + + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + modalities: Optional[List[Literal["text", "audio"]]] = None + """The set of modalities the model used to respond. + + If there are multiple modalities, the model will pick one, for example if + `modalities` is `["text", "audio"]`, the model could be responding in either + text or audio. + """ object: Optional[Literal["realtime.response"]] = None """The object type, must be `realtime.response`.""" @@ -24,6 +57,9 @@ class RealtimeResponse(BaseModel): output: Optional[List[ConversationItem]] = None """The list of output items generated by the response.""" + output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None + """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + status: Optional[Literal["completed", "cancelled", "failed", "incomplete"]] = None """ The final status of the response (`completed`, `cancelled`, `failed`, or @@ -33,6 +69,9 @@ class RealtimeResponse(BaseModel): status_details: Optional[RealtimeResponseStatus] = None """Additional details about the status.""" + temperature: Optional[float] = None + """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" + usage: Optional[RealtimeResponseUsage] = None """Usage statistics for the Response, this will correspond to billing. @@ -40,3 +79,9 @@ class RealtimeResponse(BaseModel): to the Conversation, thus output from previous turns (text and audio tokens) will become the input for later turns. """ + + voice: Optional[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] = None + """ + The voice the model used to respond. Current voice options are `alloy`, `ash`, + `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`. + """ diff --git a/src/openai/types/beta/realtime/response_create_event.py b/src/openai/types/beta/realtime/response_create_event.py index e4e5e7c68f..0801654bd8 100644 --- a/src/openai/types/beta/realtime/response_create_event.py +++ b/src/openai/types/beta/realtime/response_create_event.py @@ -4,6 +4,7 @@ from typing_extensions import Literal from ...._models import BaseModel +from ...shared.metadata import Metadata from .conversation_item import ConversationItem __all__ = ["ResponseCreateEvent", "Response", "ResponseTool"] @@ -66,12 +67,14 @@ class Response(BaseModel): `inf` for the maximum available tokens for a given model. Defaults to `inf`. """ - metadata: Optional[object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ modalities: Optional[List[Literal["text", "audio"]]] = None diff --git a/src/openai/types/beta/realtime/response_create_event_param.py b/src/openai/types/beta/realtime/response_create_event_param.py index 7a4b5f086a..a87ef955e8 100644 --- a/src/openai/types/beta/realtime/response_create_event_param.py +++ b/src/openai/types/beta/realtime/response_create_event_param.py @@ -6,6 +6,7 @@ from typing_extensions import Literal, Required, TypedDict from .conversation_item_param import ConversationItemParam +from ...shared_params.metadata import Metadata __all__ = ["ResponseCreateEventParam", "Response", "ResponseTool"] @@ -67,12 +68,14 @@ class Response(TypedDict, total=False): `inf` for the maximum available tokens for a given model. Defaults to `inf`. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ modalities: List[Literal["text", "audio"]] diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py index 3708efeecd..1502d83d39 100644 --- a/src/openai/types/beta/realtime/session_create_params.py +++ b/src/openai/types/beta/realtime/session_create_params.py @@ -22,8 +22,11 @@ class SessionCreateParams(TypedDict, total=False): Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs - asynchronously through Whisper and should be treated as rough guidance rather - than the representation understood by the model. + asynchronously through + [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as rough guidance rather than the representation + understood by the model. The client can optionally set the language and prompt + for transcription, these fields will be passed to the Whisper API. """ instructions: str @@ -101,12 +104,28 @@ class SessionCreateParams(TypedDict, total=False): class InputAudioTranscription(TypedDict, total=False): + language: str + """The language of the input audio. + + Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + """ + model: str """ The model to use for transcription, `whisper-1` is the only currently supported model. """ + prompt: str + """An optional text to guide the model's style or continue a previous audio + segment. + + The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + should match the audio language. + """ + class Tool(TypedDict, total=False): description: str diff --git a/src/openai/types/beta/realtime/session_create_response.py b/src/openai/types/beta/realtime/session_create_response.py index 31f591b261..c26e62bef1 100644 --- a/src/openai/types/beta/realtime/session_create_response.py +++ b/src/openai/types/beta/realtime/session_create_response.py @@ -9,13 +9,13 @@ class ClientSecret(BaseModel): - expires_at: Optional[int] = None + expires_at: int """Timestamp for when the token expires. Currently, all tokens expire after one minute. """ - value: Optional[str] = None + value: str """ Ephemeral key usable in client environments to authenticate connections to the Realtime API. Use this in client-side environments rather than a standard API @@ -74,7 +74,7 @@ class TurnDetection(BaseModel): class SessionCreateResponse(BaseModel): - client_secret: Optional[ClientSecret] = None + client_secret: ClientSecret """Ephemeral key returned by the API.""" input_audio_format: Optional[str] = None diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py index 322e588a4e..62fb0a3998 100644 --- a/src/openai/types/beta/realtime/session_update_event.py +++ b/src/openai/types/beta/realtime/session_update_event.py @@ -9,12 +9,28 @@ class SessionInputAudioTranscription(BaseModel): + language: Optional[str] = None + """The language of the input audio. + + Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + """ + model: Optional[str] = None """ The model to use for transcription, `whisper-1` is the only currently supported model. """ + prompt: Optional[str] = None + """An optional text to guide the model's style or continue a previous audio + segment. + + The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + should match the audio language. + """ + class SessionTool(BaseModel): description: Optional[str] = None @@ -78,8 +94,11 @@ class Session(BaseModel): Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs - asynchronously through Whisper and should be treated as rough guidance rather - than the representation understood by the model. + asynchronously through + [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as rough guidance rather than the representation + understood by the model. The client can optionally set the language and prompt + for transcription, these fields will be passed to the Whisper API. """ instructions: Optional[str] = None diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py index c01d9b6887..133cdd91a1 100644 --- a/src/openai/types/beta/realtime/session_update_event_param.py +++ b/src/openai/types/beta/realtime/session_update_event_param.py @@ -15,12 +15,28 @@ class SessionInputAudioTranscription(TypedDict, total=False): + language: str + """The language of the input audio. + + Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + """ + model: str """ The model to use for transcription, `whisper-1` is the only currently supported model. """ + prompt: str + """An optional text to guide the model's style or continue a previous audio + segment. + + The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + should match the audio language. + """ + class SessionTool(TypedDict, total=False): description: str @@ -84,8 +100,11 @@ class Session(TypedDict, total=False): Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs - asynchronously through Whisper and should be treated as rough guidance rather - than the representation understood by the model. + asynchronously through + [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription) + and should be treated as rough guidance rather than the representation + understood by the model. The client can optionally set the language and prompt + for transcription, these fields will be passed to the Whisper API. """ instructions: str diff --git a/src/openai/types/beta/thread.py b/src/openai/types/beta/thread.py index 37d50ccb93..789f66e48b 100644 --- a/src/openai/types/beta/thread.py +++ b/src/openai/types/beta/thread.py @@ -4,6 +4,7 @@ from typing_extensions import Literal from ..._models import BaseModel +from ..shared.metadata import Metadata __all__ = ["Thread", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] @@ -40,12 +41,14 @@ class Thread(BaseModel): created_at: int """The Unix timestamp (in seconds) for when the thread was created.""" - metadata: Optional[object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ object: Literal["thread"] diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 8310ba12f4..08f044c1be 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -8,6 +8,7 @@ from ..chat_model import ChatModel from .function_tool_param import FunctionToolParam from .file_search_tool_param import FileSearchToolParam +from ..shared_params.metadata import Metadata from .code_interpreter_tool_param import CodeInterpreterToolParam from .file_chunking_strategy_param import FileChunkingStrategyParam from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam @@ -67,12 +68,14 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): `incomplete_details` for more info. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ model: Union[str, ChatModel, None] @@ -122,7 +125,11 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): """ thread: Thread - """If no thread is provided, an empty thread will be created.""" + """Options to create a new thread. + + If no thread is provided when running a request, an empty thread will be + created. + """ tool_choice: Optional[AssistantToolChoiceOptionParam] """ @@ -197,12 +204,14 @@ class ThreadMessage(TypedDict, total=False): attachments: Optional[Iterable[ThreadMessageAttachment]] """A list of files attached to the message, and the tools they should be added to.""" - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ @@ -230,12 +239,14 @@ class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False): store. """ - metadata: object - """Set of 16 key-value pairs that can be attached to a vector store. + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. - This can be useful for storing additional information about the vector store in - a structured format. Keys can be a maximum of 64 characters long and values can - be a maximum of 512 characters long. + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ @@ -270,12 +281,14 @@ class Thread(TypedDict, total=False): start the thread with. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ tool_resources: Optional[ThreadToolResources] diff --git a/src/openai/types/beta/thread_create_params.py b/src/openai/types/beta/thread_create_params.py index 3ac6c7d69b..127202753c 100644 --- a/src/openai/types/beta/thread_create_params.py +++ b/src/openai/types/beta/thread_create_params.py @@ -5,6 +5,7 @@ from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..shared_params.metadata import Metadata from .code_interpreter_tool_param import CodeInterpreterToolParam from .file_chunking_strategy_param import FileChunkingStrategyParam from .threads.message_content_part_param import MessageContentPartParam @@ -29,12 +30,14 @@ class ThreadCreateParams(TypedDict, total=False): start the thread with. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ tool_resources: Optional[ToolResources] @@ -78,12 +81,14 @@ class Message(TypedDict, total=False): attachments: Optional[Iterable[MessageAttachment]] """A list of files attached to the message, and the tools they should be added to.""" - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ @@ -111,12 +116,14 @@ class ToolResourcesFileSearchVectorStore(TypedDict, total=False): store. """ - metadata: object - """Set of 16 key-value pairs that can be attached to a vector store. + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. - This can be useful for storing additional information about the vector store in - a structured format. Keys can be a maximum of 64 characters long and values can - be a maximum of 512 characters long. + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ diff --git a/src/openai/types/beta/thread_update_params.py b/src/openai/types/beta/thread_update_params.py index 78c5ec4f2e..b47ea8f3b0 100644 --- a/src/openai/types/beta/thread_update_params.py +++ b/src/openai/types/beta/thread_update_params.py @@ -5,16 +5,20 @@ from typing import List, Optional from typing_extensions import TypedDict +from ..shared_params.metadata import Metadata + __all__ = ["ThreadUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] class ThreadUpdateParams(TypedDict, total=False): - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ tool_resources: Optional[ToolResources] diff --git a/src/openai/types/beta/threads/message.py b/src/openai/types/beta/threads/message.py index 63c5c4800a..4a05a128eb 100644 --- a/src/openai/types/beta/threads/message.py +++ b/src/openai/types/beta/threads/message.py @@ -5,6 +5,7 @@ from ...._models import BaseModel from .message_content import MessageContent +from ...shared.metadata import Metadata from ..code_interpreter_tool import CodeInterpreterTool __all__ = [ @@ -66,12 +67,14 @@ class Message(BaseModel): incomplete_details: Optional[IncompleteDetails] = None """On an incomplete message, details about why the message is incomplete.""" - metadata: Optional[object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ object: Literal["thread.message"] diff --git a/src/openai/types/beta/threads/message_create_params.py b/src/openai/types/beta/threads/message_create_params.py index 2c4edfdf71..b52386824a 100644 --- a/src/openai/types/beta/threads/message_create_params.py +++ b/src/openai/types/beta/threads/message_create_params.py @@ -5,6 +5,7 @@ from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ...shared_params.metadata import Metadata from .message_content_part_param import MessageContentPartParam from ..code_interpreter_tool_param import CodeInterpreterToolParam @@ -27,12 +28,14 @@ class MessageCreateParams(TypedDict, total=False): attachments: Optional[Iterable[Attachment]] """A list of files attached to the message, and the tools they should be added to.""" - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ diff --git a/src/openai/types/beta/threads/message_update_params.py b/src/openai/types/beta/threads/message_update_params.py index e8f8cc910c..bb078281e6 100644 --- a/src/openai/types/beta/threads/message_update_params.py +++ b/src/openai/types/beta/threads/message_update_params.py @@ -5,16 +5,20 @@ from typing import Optional from typing_extensions import Required, TypedDict +from ...shared_params.metadata import Metadata + __all__ = ["MessageUpdateParams"] class MessageUpdateParams(TypedDict, total=False): thread_id: Required[str] - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index ad32135b7d..da9418d6f9 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -6,6 +6,7 @@ from ...._models import BaseModel from .run_status import RunStatus from ..assistant_tool import AssistantTool +from ...shared.metadata import Metadata from ..assistant_tool_choice_option import AssistantToolChoiceOption from ..assistant_response_format_option import AssistantResponseFormatOption from .required_action_function_tool_call import RequiredActionFunctionToolCall @@ -133,12 +134,14 @@ class Run(BaseModel): of the run. """ - metadata: Optional[object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ model: str diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index 88dc39645e..091dd3da66 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -8,6 +8,7 @@ from ...chat_model import ChatModel from ..assistant_tool_param import AssistantToolParam from .runs.run_step_include import RunStepInclude +from ...shared_params.metadata import Metadata from .message_content_part_param import MessageContentPartParam from ..code_interpreter_tool_param import CodeInterpreterToolParam from ..assistant_tool_choice_option_param import AssistantToolChoiceOptionParam @@ -80,12 +81,14 @@ class RunCreateParamsBase(TypedDict, total=False): `incomplete_details` for more info. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ model: Union[str, ChatModel, None] @@ -199,12 +202,14 @@ class AdditionalMessage(TypedDict, total=False): attachments: Optional[Iterable[AdditionalMessageAttachment]] """A list of files attached to the message, and the tools they should be added to.""" - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ diff --git a/src/openai/types/beta/threads/run_update_params.py b/src/openai/types/beta/threads/run_update_params.py index cb4f053645..fbcbd3fb14 100644 --- a/src/openai/types/beta/threads/run_update_params.py +++ b/src/openai/types/beta/threads/run_update_params.py @@ -5,16 +5,20 @@ from typing import Optional from typing_extensions import Required, TypedDict +from ...shared_params.metadata import Metadata + __all__ = ["RunUpdateParams"] class RunUpdateParams(TypedDict, total=False): thread_id: Required[str] - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ diff --git a/src/openai/types/beta/threads/runs/run_step.py b/src/openai/types/beta/threads/runs/run_step.py index 0445ae360d..b5f380c7b1 100644 --- a/src/openai/types/beta/threads/runs/run_step.py +++ b/src/openai/types/beta/threads/runs/run_step.py @@ -5,6 +5,7 @@ from ....._utils import PropertyInfo from ....._models import BaseModel +from ....shared.metadata import Metadata from .tool_calls_step_details import ToolCallsStepDetails from .message_creation_step_details import MessageCreationStepDetails @@ -70,12 +71,14 @@ class RunStep(BaseModel): Will be `null` if there are no errors. """ - metadata: Optional[object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ object: Literal["thread.run.step"] diff --git a/src/openai/types/beta/vector_store.py b/src/openai/types/beta/vector_store.py index 2d3ceea80c..b947dfb79d 100644 --- a/src/openai/types/beta/vector_store.py +++ b/src/openai/types/beta/vector_store.py @@ -4,6 +4,7 @@ from typing_extensions import Literal from ..._models import BaseModel +from ..shared.metadata import Metadata __all__ = ["VectorStore", "FileCounts", "ExpiresAfter"] @@ -48,12 +49,14 @@ class VectorStore(BaseModel): last_active_at: Optional[int] = None """The Unix timestamp (in seconds) for when the vector store was last active.""" - metadata: Optional[object] = None + metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ name: str diff --git a/src/openai/types/beta/vector_store_create_params.py b/src/openai/types/beta/vector_store_create_params.py index 4fc7c38927..faca6d9000 100644 --- a/src/openai/types/beta/vector_store_create_params.py +++ b/src/openai/types/beta/vector_store_create_params.py @@ -5,6 +5,7 @@ from typing import List, Optional from typing_extensions import Literal, Required, TypedDict +from ..shared_params.metadata import Metadata from .file_chunking_strategy_param import FileChunkingStrategyParam __all__ = ["VectorStoreCreateParams", "ExpiresAfter"] @@ -28,12 +29,14 @@ class VectorStoreCreateParams(TypedDict, total=False): files. """ - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ name: str diff --git a/src/openai/types/beta/vector_store_update_params.py b/src/openai/types/beta/vector_store_update_params.py index ff6c068efb..e91b3ba5ad 100644 --- a/src/openai/types/beta/vector_store_update_params.py +++ b/src/openai/types/beta/vector_store_update_params.py @@ -5,6 +5,8 @@ from typing import Optional from typing_extensions import Literal, Required, TypedDict +from ..shared_params.metadata import Metadata + __all__ = ["VectorStoreUpdateParams", "ExpiresAfter"] @@ -12,12 +14,14 @@ class VectorStoreUpdateParams(TypedDict, total=False): expires_after: Optional[ExpiresAfter] """The expiration policy for a vector store.""" - metadata: Optional[object] + metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a - structured format. Keys can be a maximum of 64 characters long and values can be - a maximum of 512 characters long. + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ name: Optional[str] diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py index 229fb822f4..35e3a3d784 100644 --- a/src/openai/types/chat/chat_completion_assistant_message_param.py +++ b/src/openai/types/chat/chat_completion_assistant_message_param.py @@ -38,8 +38,8 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): """The role of the messages author, in this case `assistant`.""" audio: Optional[Audio] - """ - Data about a previous audio response from the model. + """Data about a previous audio response from the model. + [Learn more](https://platform.openai.com/docs/guides/audio). """ diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 30d930b120..ec88ea1fb0 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -6,6 +6,7 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..chat_model import ChatModel +from ..shared_params.metadata import Metadata from .chat_completion_modality import ChatCompletionModality from .chat_completion_tool_param import ChatCompletionToolParam from .chat_completion_audio_param import ChatCompletionAudioParam @@ -122,10 +123,14 @@ class CompletionCreateParamsBase(TypedDict, total=False): [o1 series models](https://platform.openai.com/docs/guides/reasoning). """ - metadata: Optional[Dict[str, str]] - """ - Developer-defined tags and values used for filtering completions in the - [dashboard](https://platform.openai.com/chat-completions). + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. """ modalities: Optional[List[ChatCompletionModality]] @@ -216,9 +221,9 @@ class CompletionCreateParamsBase(TypedDict, total=False): utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no - latency guarentee. + latency guarantee. - If set to 'default', the request will be processed using the default service - tier with a lower uptime SLA and no latency guarentee. + tier with a lower uptime SLA and no latency guarantee. - When not set, the default behavior is 'auto'. """ diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py index e1ac464320..c191cb9734 100644 --- a/src/openai/types/chat_model.py +++ b/src/openai/types/chat_model.py @@ -5,6 +5,8 @@ __all__ = ["ChatModel"] ChatModel: TypeAlias = Literal[ + "o3-mini", + "o3-mini-2025-01-31", "o1", "o1-2024-12-17", "o1-preview", diff --git a/src/openai/types/shared/__init__.py b/src/openai/types/shared/__init__.py index c8776bca0e..74bf304904 100644 --- a/src/openai/types/shared/__init__.py +++ b/src/openai/types/shared/__init__.py @@ -1,5 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .metadata import Metadata as Metadata from .error_object import ErrorObject as ErrorObject from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters diff --git a/src/openai/types/shared/metadata.py b/src/openai/types/shared/metadata.py new file mode 100644 index 0000000000..0da88c679c --- /dev/null +++ b/src/openai/types/shared/metadata.py @@ -0,0 +1,8 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict +from typing_extensions import TypeAlias + +__all__ = ["Metadata"] + +Metadata: TypeAlias = Dict[str, str] diff --git a/src/openai/types/shared_params/__init__.py b/src/openai/types/shared_params/__init__.py index ab4057d59f..68a8db75fe 100644 --- a/src/openai/types/shared_params/__init__.py +++ b/src/openai/types/shared_params/__init__.py @@ -1,5 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .metadata import Metadata as Metadata from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters from .response_format_text import ResponseFormatText as ResponseFormatText diff --git a/src/openai/types/shared_params/metadata.py b/src/openai/types/shared_params/metadata.py new file mode 100644 index 0000000000..821650b48b --- /dev/null +++ b/src/openai/types/shared_params/metadata.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict +from typing_extensions import TypeAlias + +__all__ = ["Metadata"] + +Metadata: TypeAlias = Dict[str, str] diff --git a/src/openai/types/upload.py b/src/openai/types/upload.py index 1cf8ee97f8..d8108c62f9 100644 --- a/src/openai/types/upload.py +++ b/src/openai/types/upload.py @@ -39,4 +39,4 @@ class Upload(BaseModel): """The status of the Upload.""" file: Optional[FileObject] = None - """The ready File object after the Upload is completed.""" + """The `File` object represents a document that has been uploaded to OpenAI.""" diff --git a/tests/api_resources/beta/realtime/test_sessions.py b/tests/api_resources/beta/realtime/test_sessions.py index 908aa983be..5a17088ce6 100644 --- a/tests/api_resources/beta/realtime/test_sessions.py +++ b/tests/api_resources/beta/realtime/test_sessions.py @@ -26,7 +26,11 @@ def test_method_create(self, client: OpenAI) -> None: def test_method_create_with_all_params(self, client: OpenAI) -> None: session = client.beta.realtime.sessions.create( input_audio_format="pcm16", - input_audio_transcription={"model": "model"}, + input_audio_transcription={ + "language": "language", + "model": "model", + "prompt": "prompt", + }, instructions="instructions", max_response_output_tokens=0, modalities=["text"], @@ -86,7 +90,11 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: session = await async_client.beta.realtime.sessions.create( input_audio_format="pcm16", - input_audio_transcription={"model": "model"}, + input_audio_transcription={ + "language": "language", + "model": "model", + "prompt": "prompt", + }, instructions="instructions", max_response_output_tokens=0, modalities=["text"], diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py index d9944448b7..458e3f5e90 100644 --- a/tests/api_resources/beta/test_assistants.py +++ b/tests/api_resources/beta/test_assistants.py @@ -34,7 +34,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: model="gpt-4o", description="description", instructions="instructions", - metadata={}, + metadata={"foo": "string"}, name="name", response_format="auto", temperature=1, @@ -46,7 +46,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, @@ -131,7 +131,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: assistant_id="assistant_id", description="description", instructions="instructions", - metadata={}, + metadata={"foo": "string"}, model="model", name="name", response_format="auto", @@ -266,7 +266,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> model="gpt-4o", description="description", instructions="instructions", - metadata={}, + metadata={"foo": "string"}, name="name", response_format="auto", temperature=1, @@ -278,7 +278,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, @@ -363,7 +363,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> assistant_id="assistant_id", description="description", instructions="instructions", - metadata={}, + metadata={"foo": "string"}, model="model", name="name", response_format="auto", diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py index 72c5cc0f19..ea89213e95 100644 --- a/tests/api_resources/beta/test_threads.py +++ b/tests/api_resources/beta/test_threads.py @@ -39,10 +39,10 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], - metadata={}, + metadata={"foo": "string"}, tool_resources={ "code_interpreter": {"file_ids": ["string"]}, "file_search": { @@ -51,7 +51,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, @@ -128,7 +128,7 @@ def test_method_update(self, client: OpenAI) -> None: def test_method_update_with_all_params(self, client: OpenAI) -> None: thread = client.beta.threads.update( thread_id="thread_id", - metadata={}, + metadata={"foo": "string"}, tool_resources={ "code_interpreter": {"file_ids": ["string"]}, "file_search": {"vector_store_ids": ["string"]}, @@ -219,7 +219,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -236,10 +236,10 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], - "metadata": {}, + "metadata": {"foo": "string"}, "tool_resources": { "code_interpreter": {"file_ids": ["string"]}, "file_search": { @@ -248,7 +248,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, @@ -308,7 +308,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -324,10 +324,10 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], - "metadata": {}, + "metadata": {"foo": "string"}, "tool_resources": { "code_interpreter": {"file_ids": ["string"]}, "file_search": { @@ -336,7 +336,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, @@ -403,10 +403,10 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], - metadata={}, + metadata={"foo": "string"}, tool_resources={ "code_interpreter": {"file_ids": ["string"]}, "file_search": { @@ -415,7 +415,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, @@ -492,7 +492,7 @@ async def test_method_update(self, async_client: AsyncOpenAI) -> None: async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.update( thread_id="thread_id", - metadata={}, + metadata={"foo": "string"}, tool_resources={ "code_interpreter": {"file_ids": ["string"]}, "file_search": {"vector_store_ids": ["string"]}, @@ -583,7 +583,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -600,10 +600,10 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], - "metadata": {}, + "metadata": {"foo": "string"}, "tool_resources": { "code_interpreter": {"file_ids": ["string"]}, "file_search": { @@ -612,7 +612,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, @@ -672,7 +672,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -688,10 +688,10 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], - "metadata": {}, + "metadata": {"foo": "string"}, "tool_resources": { "code_interpreter": {"file_ids": ["string"]}, "file_search": { @@ -700,7 +700,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie { "chunking_strategy": {"type": "auto"}, "file_ids": ["string"], - "metadata": {}, + "metadata": {"foo": "string"}, } ], }, diff --git a/tests/api_resources/beta/test_vector_stores.py b/tests/api_resources/beta/test_vector_stores.py index 162241a13d..3216df907b 100644 --- a/tests/api_resources/beta/test_vector_stores.py +++ b/tests/api_resources/beta/test_vector_stores.py @@ -35,7 +35,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "days": 1, }, file_ids=["string"], - metadata={}, + metadata={"foo": "string"}, name="name", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @@ -113,7 +113,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: "anchor": "last_active_at", "days": 1, }, - metadata={}, + metadata={"foo": "string"}, name="name", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @@ -240,7 +240,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "days": 1, }, file_ids=["string"], - metadata={}, + metadata={"foo": "string"}, name="name", ) assert_matches_type(VectorStore, vector_store, path=["response"]) @@ -318,7 +318,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> "anchor": "last_active_at", "days": 1, }, - metadata={}, + metadata={"foo": "string"}, name="name", ) assert_matches_type(VectorStore, vector_store, path=["response"]) diff --git a/tests/api_resources/beta/threads/test_messages.py b/tests/api_resources/beta/threads/test_messages.py index 1d50c73e92..c965f0ab90 100644 --- a/tests/api_resources/beta/threads/test_messages.py +++ b/tests/api_resources/beta/threads/test_messages.py @@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: "tools": [{"type": "code_interpreter"}], } ], - metadata={}, + metadata={"foo": "string"}, ) assert_matches_type(Message, message, path=["response"]) @@ -144,7 +144,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: message = client.beta.threads.messages.update( message_id="message_id", thread_id="thread_id", - metadata={}, + metadata={"foo": "string"}, ) assert_matches_type(Message, message, path=["response"]) @@ -311,7 +311,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> "tools": [{"type": "code_interpreter"}], } ], - metadata={}, + metadata={"foo": "string"}, ) assert_matches_type(Message, message, path=["response"]) @@ -413,7 +413,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> message = await async_client.beta.threads.messages.update( message_id="message_id", thread_id="thread_id", - metadata={}, + metadata={"foo": "string"}, ) assert_matches_type(Message, message, path=["response"]) diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index ecce003a85..1509d5e9ab 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -45,13 +45,13 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -128,13 +128,13 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -246,7 +246,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: run = client.beta.threads.runs.update( run_id="run_id", thread_id="thread_id", - metadata={}, + metadata={"foo": "string"}, ) assert_matches_type(Run, run, path=["response"]) @@ -541,13 +541,13 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -624,13 +624,13 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "tools": [{"type": "code_interpreter"}], } ], - "metadata": {}, + "metadata": {"foo": "string"}, } ], instructions="instructions", max_completion_tokens=256, max_prompt_tokens=256, - metadata={}, + metadata={"foo": "string"}, model="gpt-4o", parallel_tool_calls=True, response_format="auto", @@ -742,7 +742,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> run = await async_client.beta.threads.runs.update( run_id="run_id", thread_id="thread_id", - metadata={}, + metadata={"foo": "string"}, ) assert_matches_type(Run, run, path=["response"])