diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml new file mode 100644 index 0000000000..b3e1c679d4 --- /dev/null +++ b/.github/workflows/create-releases.yml @@ -0,0 +1,39 @@ +name: Create releases +on: + schedule: + - cron: '0 5 * * *' # every day at 5am UTC + push: + branches: + - main + +jobs: + release: + name: release + if: github.ref == 'refs/heads/main' && github.repository == 'openai/openai-python' + runs-on: ubuntu-latest + environment: publish + + steps: + - uses: actions/checkout@v4 + + - uses: stainless-api/trigger-release-please@v1 + id: release + with: + repo: ${{ github.event.repository.full_name }} + stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} + + - name: Install Rye + if: ${{ steps.release.outputs.releases_created }} + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Publish to PyPI + if: ${{ steps.release.outputs.releases_created }} + run: | + bash ./bin/publish-pypi + env: + PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json index dac37ce406..e567f9cb13 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.66.4" + ".": "1.66.5" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 53c73037d5..b032562238 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 81 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c8579861bc21d4d2155a5b9e8e7d54faee8083730673c4d32cbbe573d7fb4116.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f3bce04386c4fcfd5037e0477fbaa39010003fd1558eb5185fe4a71dd6a05fdd.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ed70082c7..d8fb019fc8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 1.66.5 (2025-03-18) + +Full Changelog: [v1.66.4...v1.66.5](https://github.com/openai/openai-python/compare/v1.66.4...v1.66.5) + +### Bug Fixes + +* **types:** improve responses type names ([#2224](https://github.com/openai/openai-python/issues/2224)) ([5f7beb8](https://github.com/openai/openai-python/commit/5f7beb873af5ccef2551f34ab3ef098e099ce9c6)) + + +### Chores + +* **internal:** add back releases workflow ([c71d4c9](https://github.com/openai/openai-python/commit/c71d4c918eab3532b36ea944b0c4069db6ac2d38)) +* **internal:** codegen related update ([#2222](https://github.com/openai/openai-python/issues/2222)) ([f570d91](https://github.com/openai/openai-python/commit/f570d914a16cb5092533e32dfd863027d378c0b5)) + ## 1.66.4 (2025-03-17) Full Changelog: [v1.66.3...v1.66.4](https://github.com/openai/openai-python/compare/v1.66.3...v1.66.4) diff --git a/api.md b/api.md index e760fe69c1..6e7f48a645 100644 --- a/api.md +++ b/api.md @@ -605,6 +605,8 @@ from openai.types.responses import ( ResponseCodeInterpreterToolCall, ResponseCompletedEvent, ResponseComputerToolCall, + ResponseComputerToolCallOutputItem, + ResponseComputerToolCallOutputScreenshot, ResponseContent, ResponseContentPartAddedEvent, ResponseContentPartDoneEvent, @@ -621,6 +623,8 @@ from openai.types.responses import ( ResponseFunctionCallArgumentsDeltaEvent, ResponseFunctionCallArgumentsDoneEvent, ResponseFunctionToolCall, + ResponseFunctionToolCallItem, + ResponseFunctionToolCallOutputItem, ResponseFunctionWebSearch, ResponseInProgressEvent, ResponseIncludable, @@ -632,7 +636,9 @@ from openai.types.responses import ( ResponseInputImage, ResponseInputItem, ResponseInputMessageContentList, + ResponseInputMessageItem, ResponseInputText, + ResponseItem, ResponseOutputAudio, ResponseOutputItem, ResponseOutputItemAddedEvent, @@ -677,4 +683,4 @@ from openai.types.responses import ResponseItemList Methods: -- client.responses.input_items.list(response_id, \*\*params) -> SyncCursorPage[Data] +- client.responses.input_items.list(response_id, \*\*params) -> SyncCursorPage[ResponseItem] diff --git a/pyproject.toml b/pyproject.toml index 8247861185..5fdf2a836d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.66.4" +version = "1.66.5" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index df2f60a7dc..dbefc6ec32 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.66.4" # x-release-please-version +__version__ = "1.66.5" # x-release-please-version diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index 7e7ec19ec2..b7a299be12 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -49,7 +49,7 @@ def create( self, *, completion_window: Literal["24h"], - endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"], + endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -67,9 +67,9 @@ def create( is supported. endpoint: The endpoint to be used for all requests in the batch. Currently - `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 - embedding inputs across all requests in the batch. + `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + are supported. Note that `/v1/embeddings` batches are also restricted to a + maximum of 50,000 embedding inputs across all requests in the batch. input_file_id: The ID of an uploaded file that contains requests for the new batch. @@ -259,7 +259,7 @@ async def create( self, *, completion_window: Literal["24h"], - endpoint: Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"], + endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -277,9 +277,9 @@ async def create( is supported. endpoint: The endpoint to be used for all requests in the batch. Currently - `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. - Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 - embedding inputs across all requests in the batch. + `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` + are supported. Note that `/v1/embeddings` batches are also restricted to a + maximum of 50,000 embedding inputs across all requests in the batch. input_file_id: The ID of an uploaded file that contains requests for the new batch. diff --git a/src/openai/resources/responses/input_items.py b/src/openai/resources/responses/input_items.py index 10e7d545dc..e341393cd1 100644 --- a/src/openai/resources/responses/input_items.py +++ b/src/openai/resources/responses/input_items.py @@ -16,7 +16,7 @@ from ...pagination import SyncCursorPage, AsyncCursorPage from ..._base_client import AsyncPaginator, make_request_options from ...types.responses import input_item_list_params -from ...types.responses.response_item_list import Data +from ...types.responses.response_item import ResponseItem __all__ = ["InputItems", "AsyncInputItems"] @@ -55,7 +55,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SyncCursorPage[Data]: + ) -> SyncCursorPage[ResponseItem]: """ Returns a list of input items for a given response. @@ -84,7 +84,7 @@ def list( raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") return self._get_api_list( f"/responses/{response_id}/input_items", - page=SyncCursorPage[Data], + page=SyncCursorPage[ResponseItem], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -100,7 +100,7 @@ def list( input_item_list_params.InputItemListParams, ), ), - model=cast(Any, Data), # Union types cannot be passed in as arguments in the type system + model=cast(Any, ResponseItem), # Union types cannot be passed in as arguments in the type system ) @@ -138,7 +138,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncPaginator[Data, AsyncCursorPage[Data]]: + ) -> AsyncPaginator[ResponseItem, AsyncCursorPage[ResponseItem]]: """ Returns a list of input items for a given response. @@ -167,7 +167,7 @@ def list( raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}") return self._get_api_list( f"/responses/{response_id}/input_items", - page=AsyncCursorPage[Data], + page=AsyncCursorPage[ResponseItem], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -183,7 +183,7 @@ def list( input_item_list_params.InputItemListParams, ), ), - model=cast(Any, Data), # Union types cannot be passed in as arguments in the type system + model=cast(Any, ResponseItem), # Union types cannot be passed in as arguments in the type system ) diff --git a/src/openai/types/batch_create_params.py b/src/openai/types/batch_create_params.py index e5be1d2bac..cc95afd3ba 100644 --- a/src/openai/types/batch_create_params.py +++ b/src/openai/types/batch_create_params.py @@ -17,12 +17,13 @@ class BatchCreateParams(TypedDict, total=False): Currently only `24h` is supported. """ - endpoint: Required[Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"]] + endpoint: Required[Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"]] """The endpoint to be used for all requests in the batch. - Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are - supported. Note that `/v1/embeddings` batches are also restricted to a maximum - of 50,000 embedding inputs across all requests in the batch. + Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and + `/v1/completions` are supported. Note that `/v1/embeddings` batches are also + restricted to a maximum of 50,000 embedding inputs across all requests in the + batch. """ input_file_id: Required[str] diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index dede513f1e..31b9cb5456 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -142,6 +142,9 @@ class ChatCompletionChunk(BaseModel): """ An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. When present, it - contains a null value except for the last chunk which contains the token usage - statistics for the entire request. + contains a null value **except for the last chunk** which contains the token + usage statistics for the entire request. + + **NOTE:** If the stream is interrupted or cancelled, you may not receive the + final usage chunk which contains the total token usage for the request. """ diff --git a/src/openai/types/chat/chat_completion_content_part_param.py b/src/openai/types/chat/chat_completion_content_part_param.py index 1293c54312..cbedc853ba 100644 --- a/src/openai/types/chat/chat_completion_content_part_param.py +++ b/src/openai/types/chat/chat_completion_content_part_param.py @@ -22,7 +22,7 @@ class FileFile(TypedDict, total=False): file_id: str """The ID of an uploaded file to use as input.""" - file_name: str + filename: str """The name of the file, used when passing the file to the model as a string.""" diff --git a/src/openai/types/chat/chat_completion_stream_options_param.py b/src/openai/types/chat/chat_completion_stream_options_param.py index fbf7291821..471e0eba98 100644 --- a/src/openai/types/chat/chat_completion_stream_options_param.py +++ b/src/openai/types/chat/chat_completion_stream_options_param.py @@ -12,6 +12,9 @@ class ChatCompletionStreamOptionsParam(TypedDict, total=False): """If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire - request, and the `choices` field will always be an empty array. All other chunks - will also include a `usage` field, but with a null value. + request, and the `choices` field will always be an empty array. + + All other chunks will also include a `usage` field, but with a null value. + **NOTE:** If the stream is interrupted, you may not receive the final usage + chunk which contains the total token usage for the request. """ diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py index 7c0cf9e3f2..4f07a3d097 100644 --- a/src/openai/types/responses/__init__.py +++ b/src/openai/types/responses/__init__.py @@ -7,6 +7,7 @@ from .tool_param import ToolParam as ToolParam from .computer_tool import ComputerTool as ComputerTool from .function_tool import FunctionTool as FunctionTool +from .response_item import ResponseItem as ResponseItem from .response_error import ResponseError as ResponseError from .response_usage import ResponseUsage as ResponseUsage from .parsed_response import ( @@ -66,6 +67,7 @@ from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig from .response_function_tool_call import ResponseFunctionToolCall as ResponseFunctionToolCall +from .response_input_message_item import ResponseInputMessageItem as ResponseInputMessageItem from .response_refusal_done_event import ResponseRefusalDoneEvent as ResponseRefusalDoneEvent from .response_function_web_search import ResponseFunctionWebSearch as ResponseFunctionWebSearch from .response_input_content_param import ResponseInputContentParam as ResponseInputContentParam @@ -76,6 +78,7 @@ from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent +from .response_function_tool_call_item import ResponseFunctionToolCallItem as ResponseFunctionToolCallItem from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent from .response_computer_tool_call_param import ResponseComputerToolCallParam as ResponseComputerToolCallParam from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent @@ -90,9 +93,15 @@ from .response_audio_transcript_delta_event import ( ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, ) +from .response_computer_tool_call_output_item import ( + ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem, +) from .response_format_text_json_schema_config import ( ResponseFormatTextJSONSchemaConfig as ResponseFormatTextJSONSchemaConfig, ) +from .response_function_tool_call_output_item import ( + ResponseFunctionToolCallOutputItem as ResponseFunctionToolCallOutputItem, +) from .response_web_search_call_completed_event import ( ResponseWebSearchCallCompletedEvent as ResponseWebSearchCallCompletedEvent, ) @@ -120,6 +129,9 @@ from .response_function_call_arguments_delta_event import ( ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, ) +from .response_computer_tool_call_output_screenshot import ( + ResponseComputerToolCallOutputScreenshot as ResponseComputerToolCallOutputScreenshot, +) from .response_format_text_json_schema_config_param import ( ResponseFormatTextJSONSchemaConfigParam as ResponseFormatTextJSONSchemaConfigParam, ) @@ -138,3 +150,6 @@ from .response_code_interpreter_call_interpreting_event import ( ResponseCodeInterpreterCallInterpretingEvent as ResponseCodeInterpreterCallInterpretingEvent, ) +from .response_computer_tool_call_output_screenshot_param import ( + ResponseComputerToolCallOutputScreenshotParam as ResponseComputerToolCallOutputScreenshotParam, +) diff --git a/src/openai/types/responses/response_computer_tool_call_output_item.py b/src/openai/types/responses/response_computer_tool_call_output_item.py new file mode 100644 index 0000000000..a2dd68f579 --- /dev/null +++ b/src/openai/types/responses/response_computer_tool_call_output_item.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .response_computer_tool_call_output_screenshot import ResponseComputerToolCallOutputScreenshot + +__all__ = ["ResponseComputerToolCallOutputItem", "AcknowledgedSafetyCheck"] + + +class AcknowledgedSafetyCheck(BaseModel): + id: str + """The ID of the pending safety check.""" + + code: str + """The type of the pending safety check.""" + + message: str + """Details about the pending safety check.""" + + +class ResponseComputerToolCallOutputItem(BaseModel): + id: str + """The unique ID of the computer call tool output.""" + + call_id: str + """The ID of the computer tool call that produced the output.""" + + output: ResponseComputerToolCallOutputScreenshot + """A computer screenshot image used with the computer use tool.""" + + type: Literal["computer_call_output"] + """The type of the computer tool call output. Always `computer_call_output`.""" + + acknowledged_safety_checks: Optional[List[AcknowledgedSafetyCheck]] = None + """ + The safety checks reported by the API that have been acknowledged by the + developer. + """ + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the message input. + + One of `in_progress`, `completed`, or `incomplete`. Populated when input items + are returned via API. + """ diff --git a/src/openai/types/responses/response_computer_tool_call_output_screenshot.py b/src/openai/types/responses/response_computer_tool_call_output_screenshot.py new file mode 100644 index 0000000000..a500da85c1 --- /dev/null +++ b/src/openai/types/responses/response_computer_tool_call_output_screenshot.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseComputerToolCallOutputScreenshot"] + + +class ResponseComputerToolCallOutputScreenshot(BaseModel): + type: Literal["computer_screenshot"] + """Specifies the event type. + + For a computer screenshot, this property is always set to `computer_screenshot`. + """ + + file_id: Optional[str] = None + """The identifier of an uploaded file that contains the screenshot.""" + + image_url: Optional[str] = None + """The URL of the screenshot image.""" diff --git a/src/openai/types/responses/response_computer_tool_call_output_screenshot_param.py b/src/openai/types/responses/response_computer_tool_call_output_screenshot_param.py new file mode 100644 index 0000000000..efc2028aa4 --- /dev/null +++ b/src/openai/types/responses/response_computer_tool_call_output_screenshot_param.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseComputerToolCallOutputScreenshotParam"] + + +class ResponseComputerToolCallOutputScreenshotParam(TypedDict, total=False): + type: Required[Literal["computer_screenshot"]] + """Specifies the event type. + + For a computer screenshot, this property is always set to `computer_screenshot`. + """ + + file_id: str + """The identifier of an uploaded file that contains the screenshot.""" + + image_url: str + """The URL of the screenshot image.""" diff --git a/src/openai/types/responses/response_function_tool_call.py b/src/openai/types/responses/response_function_tool_call.py index 5d82906cb7..2a8482204e 100644 --- a/src/openai/types/responses/response_function_tool_call.py +++ b/src/openai/types/responses/response_function_tool_call.py @@ -9,9 +9,6 @@ class ResponseFunctionToolCall(BaseModel): - id: str - """The unique ID of the function tool call.""" - arguments: str """A JSON string of the arguments to pass to the function.""" @@ -24,6 +21,9 @@ class ResponseFunctionToolCall(BaseModel): type: Literal["function_call"] """The type of the function tool call. Always `function_call`.""" + id: Optional[str] = None + """The unique ID of the function tool call.""" + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None """The status of the item. diff --git a/src/openai/types/responses/response_function_tool_call_item.py b/src/openai/types/responses/response_function_tool_call_item.py new file mode 100644 index 0000000000..477e9b70aa --- /dev/null +++ b/src/openai/types/responses/response_function_tool_call_item.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + +from .response_function_tool_call import ResponseFunctionToolCall + +__all__ = ["ResponseFunctionToolCallItem"] + + +class ResponseFunctionToolCallItem(ResponseFunctionToolCall): + id: str # type: ignore + """The unique ID of the function call tool output.""" diff --git a/src/openai/types/responses/response_function_tool_call_output_item.py b/src/openai/types/responses/response_function_tool_call_output_item.py new file mode 100644 index 0000000000..4c8c41a6fe --- /dev/null +++ b/src/openai/types/responses/response_function_tool_call_output_item.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFunctionToolCallOutputItem"] + + +class ResponseFunctionToolCallOutputItem(BaseModel): + id: str + """The unique ID of the function call tool output.""" + + call_id: str + """The unique ID of the function tool call generated by the model.""" + + output: str + """A JSON string of the output of the function tool call.""" + + type: Literal["function_call_output"] + """The type of the function tool call output. Always `function_call_output`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ diff --git a/src/openai/types/responses/response_function_tool_call_param.py b/src/openai/types/responses/response_function_tool_call_param.py index 51b947a764..eaa263cf67 100644 --- a/src/openai/types/responses/response_function_tool_call_param.py +++ b/src/openai/types/responses/response_function_tool_call_param.py @@ -8,9 +8,6 @@ class ResponseFunctionToolCallParam(TypedDict, total=False): - id: Required[str] - """The unique ID of the function tool call.""" - arguments: Required[str] """A JSON string of the arguments to pass to the function.""" @@ -23,6 +20,9 @@ class ResponseFunctionToolCallParam(TypedDict, total=False): type: Required[Literal["function_call"]] """The type of the function tool call. Always `function_call`.""" + id: str + """The unique ID of the function tool call.""" + status: Literal["in_progress", "completed", "incomplete"] """The status of the item. diff --git a/src/openai/types/responses/response_input_item_param.py b/src/openai/types/responses/response_input_item_param.py index 32ac13cabb..2505f7c0b5 100644 --- a/src/openai/types/responses/response_input_item_param.py +++ b/src/openai/types/responses/response_input_item_param.py @@ -13,12 +13,12 @@ from .response_function_web_search_param import ResponseFunctionWebSearchParam from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam from .response_input_message_content_list_param import ResponseInputMessageContentListParam +from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam __all__ = [ "ResponseInputItemParam", "Message", "ComputerCallOutput", - "ComputerCallOutputOutput", "ComputerCallOutputAcknowledgedSafetyCheck", "FunctionCallOutput", "ItemReference", @@ -46,20 +46,6 @@ class Message(TypedDict, total=False): """The type of the message input. Always set to `message`.""" -class ComputerCallOutputOutput(TypedDict, total=False): - type: Required[Literal["computer_screenshot"]] - """Specifies the event type. - - For a computer screenshot, this property is always set to `computer_screenshot`. - """ - - file_id: str - """The identifier of an uploaded file that contains the screenshot.""" - - image_url: str - """The URL of the screenshot image.""" - - class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): id: Required[str] """The ID of the pending safety check.""" @@ -75,7 +61,7 @@ class ComputerCallOutput(TypedDict, total=False): call_id: Required[str] """The ID of the computer tool call that produced the output.""" - output: Required[ComputerCallOutputOutput] + output: Required[ResponseComputerToolCallOutputScreenshotParam] """A computer screenshot image used with the computer use tool.""" type: Required[Literal["computer_call_output"]] diff --git a/src/openai/types/responses/response_input_message_item.py b/src/openai/types/responses/response_input_message_item.py new file mode 100644 index 0000000000..6a788e7fa4 --- /dev/null +++ b/src/openai/types/responses/response_input_message_item.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .response_input_message_content_list import ResponseInputMessageContentList + +__all__ = ["ResponseInputMessageItem"] + + +class ResponseInputMessageItem(BaseModel): + id: str + """The unique ID of the message input.""" + + content: ResponseInputMessageContentList + """ + A list of one or many input items to the model, containing different content + types. + """ + + role: Literal["user", "system", "developer"] + """The role of the message input. One of `user`, `system`, or `developer`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + type: Optional[Literal["message"]] = None + """The type of the message input. Always set to `message`.""" diff --git a/src/openai/types/responses/response_input_param.py b/src/openai/types/responses/response_input_param.py index b942f4868a..84a80eb7c2 100644 --- a/src/openai/types/responses/response_input_param.py +++ b/src/openai/types/responses/response_input_param.py @@ -13,13 +13,13 @@ from .response_function_web_search_param import ResponseFunctionWebSearchParam from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam from .response_input_message_content_list_param import ResponseInputMessageContentListParam +from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam __all__ = [ "ResponseInputParam", "ResponseInputItemParam", "Message", "ComputerCallOutput", - "ComputerCallOutputOutput", "ComputerCallOutputAcknowledgedSafetyCheck", "FunctionCallOutput", "ItemReference", @@ -47,20 +47,6 @@ class Message(TypedDict, total=False): """The type of the message input. Always set to `message`.""" -class ComputerCallOutputOutput(TypedDict, total=False): - type: Required[Literal["computer_screenshot"]] - """Specifies the event type. - - For a computer screenshot, this property is always set to `computer_screenshot`. - """ - - file_id: str - """The identifier of an uploaded file that contains the screenshot.""" - - image_url: str - """The URL of the screenshot image.""" - - class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): id: Required[str] """The ID of the pending safety check.""" @@ -76,7 +62,7 @@ class ComputerCallOutput(TypedDict, total=False): call_id: Required[str] """The ID of the computer tool call that produced the output.""" - output: Required[ComputerCallOutputOutput] + output: Required[ResponseComputerToolCallOutputScreenshotParam] """A computer screenshot image used with the computer use tool.""" type: Required[Literal["computer_call_output"]] diff --git a/src/openai/types/responses/response_item.py b/src/openai/types/responses/response_item.py new file mode 100644 index 0000000000..dc8d67d0f2 --- /dev/null +++ b/src/openai/types/responses/response_item.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .response_output_message import ResponseOutputMessage +from .response_computer_tool_call import ResponseComputerToolCall +from .response_input_message_item import ResponseInputMessageItem +from .response_function_web_search import ResponseFunctionWebSearch +from .response_file_search_tool_call import ResponseFileSearchToolCall +from .response_function_tool_call_item import ResponseFunctionToolCallItem +from .response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem +from .response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem + +__all__ = ["ResponseItem"] + +ResponseItem: TypeAlias = Annotated[ + Union[ + ResponseInputMessageItem, + ResponseOutputMessage, + ResponseFileSearchToolCall, + ResponseComputerToolCall, + ResponseComputerToolCallOutputItem, + ResponseFunctionWebSearch, + ResponseFunctionToolCallItem, + ResponseFunctionToolCallOutputItem, + ], + PropertyInfo(discriminator="type"), +] diff --git a/src/openai/types/responses/response_item_list.py b/src/openai/types/responses/response_item_list.py index 7c3e4d7f82..b43eacdb51 100644 --- a/src/openai/types/responses/response_item_list.py +++ b/src/openai/types/responses/response_item_list.py @@ -1,142 +1,16 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional -from typing_extensions import Literal, Annotated, TypeAlias +from typing import List +from typing_extensions import Literal -from ..._utils import PropertyInfo from ..._models import BaseModel -from .response_output_message import ResponseOutputMessage -from .response_computer_tool_call import ResponseComputerToolCall -from .response_function_tool_call import ResponseFunctionToolCall -from .response_function_web_search import ResponseFunctionWebSearch -from .response_file_search_tool_call import ResponseFileSearchToolCall -from .response_input_message_content_list import ResponseInputMessageContentList +from .response_item import ResponseItem -__all__ = [ - "ResponseItemList", - "Data", - "DataMessage", - "DataComputerCallOutput", - "DataComputerCallOutputOutput", - "DataComputerCallOutputAcknowledgedSafetyCheck", - "DataFunctionCallOutput", -] - - -class DataMessage(BaseModel): - id: str - """The unique ID of the message input.""" - - content: ResponseInputMessageContentList - """ - A list of one or many input items to the model, containing different content - types. - """ - - role: Literal["user", "system", "developer"] - """The role of the message input. One of `user`, `system`, or `developer`.""" - - status: Optional[Literal["in_progress", "completed", "incomplete"]] = None - """The status of item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ - - type: Optional[Literal["message"]] = None - """The type of the message input. Always set to `message`.""" - - -class DataComputerCallOutputOutput(BaseModel): - type: Literal["computer_screenshot"] - """Specifies the event type. - - For a computer screenshot, this property is always set to `computer_screenshot`. - """ - - file_id: Optional[str] = None - """The identifier of an uploaded file that contains the screenshot.""" - - image_url: Optional[str] = None - """The URL of the screenshot image.""" - - -class DataComputerCallOutputAcknowledgedSafetyCheck(BaseModel): - id: str - """The ID of the pending safety check.""" - - code: str - """The type of the pending safety check.""" - - message: str - """Details about the pending safety check.""" - - -class DataComputerCallOutput(BaseModel): - id: str - """The unique ID of the computer call tool output.""" - - call_id: str - """The ID of the computer tool call that produced the output.""" - - output: DataComputerCallOutputOutput - """A computer screenshot image used with the computer use tool.""" - - type: Literal["computer_call_output"] - """The type of the computer tool call output. Always `computer_call_output`.""" - - acknowledged_safety_checks: Optional[List[DataComputerCallOutputAcknowledgedSafetyCheck]] = None - """ - The safety checks reported by the API that have been acknowledged by the - developer. - """ - - status: Optional[Literal["in_progress", "completed", "incomplete"]] = None - """The status of the message input. - - One of `in_progress`, `completed`, or `incomplete`. Populated when input items - are returned via API. - """ - - -class DataFunctionCallOutput(BaseModel): - id: str - """The unique ID of the function call tool output.""" - - call_id: str - """The unique ID of the function tool call generated by the model.""" - - output: str - """A JSON string of the output of the function tool call.""" - - type: Literal["function_call_output"] - """The type of the function tool call output. Always `function_call_output`.""" - - status: Optional[Literal["in_progress", "completed", "incomplete"]] = None - """The status of the item. - - One of `in_progress`, `completed`, or `incomplete`. Populated when items are - returned via API. - """ - - -Data: TypeAlias = Annotated[ - Union[ - DataMessage, - ResponseOutputMessage, - ResponseFileSearchToolCall, - ResponseComputerToolCall, - DataComputerCallOutput, - ResponseFunctionWebSearch, - ResponseFunctionToolCall, - DataFunctionCallOutput, - ], - PropertyInfo(discriminator="type"), -] +__all__ = ["ResponseItemList"] class ResponseItemList(BaseModel): - data: List[Data] + data: List[ResponseItem] """A list of items used to generate this response.""" first_id: str diff --git a/src/openai/types/responses/response_usage.py b/src/openai/types/responses/response_usage.py index ef631c5882..9ad36bd326 100644 --- a/src/openai/types/responses/response_usage.py +++ b/src/openai/types/responses/response_usage.py @@ -3,7 +3,15 @@ from ..._models import BaseModel -__all__ = ["ResponseUsage", "OutputTokensDetails"] +__all__ = ["ResponseUsage", "InputTokensDetails", "OutputTokensDetails"] + + +class InputTokensDetails(BaseModel): + cached_tokens: int + """The number of tokens that were retrieved from the cache. + + [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). + """ class OutputTokensDetails(BaseModel): @@ -15,6 +23,9 @@ class ResponseUsage(BaseModel): input_tokens: int """The number of input tokens.""" + input_tokens_details: InputTokensDetails + """A detailed breakdown of the input tokens.""" + output_tokens: int """The number of output tokens.""" diff --git a/src/openai/types/shared/reasoning.py b/src/openai/types/shared/reasoning.py index 50821a1727..78a396d738 100644 --- a/src/openai/types/shared/reasoning.py +++ b/src/openai/types/shared/reasoning.py @@ -20,7 +20,7 @@ class Reasoning(BaseModel): """ generate_summary: Optional[Literal["concise", "detailed"]] = None - """**o-series models only** + """**computer_use_preview only** A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of `concise` or diff --git a/src/openai/types/shared_params/reasoning.py b/src/openai/types/shared_params/reasoning.py index f2b5c5963a..2953b895c4 100644 --- a/src/openai/types/shared_params/reasoning.py +++ b/src/openai/types/shared_params/reasoning.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import Optional -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, TypedDict from ..shared.reasoning_effort import ReasoningEffort @@ -11,7 +11,7 @@ class Reasoning(TypedDict, total=False): - effort: Required[Optional[ReasoningEffort]] + effort: Optional[ReasoningEffort] """**o-series models only** Constrains effort on reasoning for @@ -21,7 +21,7 @@ class Reasoning(TypedDict, total=False): """ generate_summary: Optional[Literal["concise", "detailed"]] - """**o-series models only** + """**computer_use_preview only** A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. One of `concise` or diff --git a/tests/api_resources/responses/test_input_items.py b/tests/api_resources/responses/test_input_items.py index 28c5e8ca1f..77a156b5ac 100644 --- a/tests/api_resources/responses/test_input_items.py +++ b/tests/api_resources/responses/test_input_items.py @@ -10,7 +10,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage -from openai.types.responses.response_item_list import Data +from openai.types.responses import ResponseItem base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -23,7 +23,7 @@ def test_method_list(self, client: OpenAI) -> None: input_item = client.responses.input_items.list( response_id="response_id", ) - assert_matches_type(SyncCursorPage[Data], input_item, path=["response"]) + assert_matches_type(SyncCursorPage[ResponseItem], input_item, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: @@ -34,7 +34,7 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None: limit=0, order="asc", ) - assert_matches_type(SyncCursorPage[Data], input_item, path=["response"]) + assert_matches_type(SyncCursorPage[ResponseItem], input_item, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: @@ -45,7 +45,7 @@ def test_raw_response_list(self, client: OpenAI) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" input_item = response.parse() - assert_matches_type(SyncCursorPage[Data], input_item, path=["response"]) + assert_matches_type(SyncCursorPage[ResponseItem], input_item, path=["response"]) @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: @@ -56,7 +56,7 @@ def test_streaming_response_list(self, client: OpenAI) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" input_item = response.parse() - assert_matches_type(SyncCursorPage[Data], input_item, path=["response"]) + assert_matches_type(SyncCursorPage[ResponseItem], input_item, path=["response"]) assert cast(Any, response.is_closed) is True @@ -76,7 +76,7 @@ async def test_method_list(self, async_client: AsyncOpenAI) -> None: input_item = await async_client.responses.input_items.list( response_id="response_id", ) - assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"]) + assert_matches_type(AsyncCursorPage[ResponseItem], input_item, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: @@ -87,7 +87,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N limit=0, order="asc", ) - assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"]) + assert_matches_type(AsyncCursorPage[ResponseItem], input_item, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: @@ -98,7 +98,7 @@ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" input_item = response.parse() - assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"]) + assert_matches_type(AsyncCursorPage[ResponseItem], input_item, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: @@ -109,7 +109,7 @@ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" input_item = await response.parse() - assert_matches_type(AsyncCursorPage[Data], input_item, path=["response"]) + assert_matches_type(AsyncCursorPage[ResponseItem], input_item, path=["response"]) assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py index 6f9b598e61..a2f8fb48a3 100644 --- a/tests/api_resources/test_batches.py +++ b/tests/api_resources/test_batches.py @@ -22,7 +22,7 @@ class TestBatches: def test_method_create(self, client: OpenAI) -> None: batch = client.batches.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="string", ) assert_matches_type(Batch, batch, path=["response"]) @@ -31,7 +31,7 @@ def test_method_create(self, client: OpenAI) -> None: def test_method_create_with_all_params(self, client: OpenAI) -> None: batch = client.batches.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="string", metadata={"foo": "string"}, ) @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: def test_raw_response_create(self, client: OpenAI) -> None: response = client.batches.with_raw_response.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="string", ) @@ -54,7 +54,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: def test_streaming_response_create(self, client: OpenAI) -> None: with client.batches.with_streaming_response.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="string", ) as response: assert not response.is_closed @@ -182,7 +182,7 @@ class TestAsyncBatches: async def test_method_create(self, async_client: AsyncOpenAI) -> None: batch = await async_client.batches.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="string", ) assert_matches_type(Batch, batch, path=["response"]) @@ -191,7 +191,7 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: batch = await async_client.batches.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="string", metadata={"foo": "string"}, ) @@ -201,7 +201,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.batches.with_raw_response.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="string", ) @@ -214,7 +214,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.batches.with_streaming_response.create( completion_window="24h", - endpoint="/v1/chat/completions", + endpoint="/v1/responses", input_file_id="string", ) as response: assert not response.is_closed