diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 68804e4da0..285741ee32 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
 {
-  ".": "1.61.0"
+  ".": "1.61.1"
 }
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index e49b5c56e8..df7877dfd0 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
 configured_endpoints: 69
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-6204952a29973265b9c0d66fc67ffaf53c6a90ae4d75cdacf9d147676f5274c9.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fc5dbc19505b0035f9e7f88868619f4fb519b048bde011f6154f3132d4be71fb.yml
diff --git a/CHANGELOG.md b/CHANGELOG.md
index dcd1c06333..101e7480b7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,20 @@
 # Changelog
 
+## 1.61.1 (2025-02-05)
+
+Full Changelog: [v1.61.0...v1.61.1](https://github.com/openai/openai-python/compare/v1.61.0...v1.61.1)
+
+### Bug Fixes
+
+* **api/types:** correct audio duration & role types ([#2091](https://github.com/openai/openai-python/issues/2091)) ([afcea48](https://github.com/openai/openai-python/commit/afcea4891ff85de165ccc2b5497ccf9a90520e9e))
+* **cli/chat:** only send params when set ([#2077](https://github.com/openai/openai-python/issues/2077)) ([688b223](https://github.com/openai/openai-python/commit/688b223d9a733d241d50e5d7df62f346592c537c))
+
+
+### Chores
+
+* **internal:** bummp ruff dependency ([#2080](https://github.com/openai/openai-python/issues/2080)) ([b7a80b1](https://github.com/openai/openai-python/commit/b7a80b1994ab86e81485b88531e4aea63b3da594))
+* **internal:** change default timeout to an int ([#2079](https://github.com/openai/openai-python/issues/2079)) ([d3df1c6](https://github.com/openai/openai-python/commit/d3df1c6ca090598701e38fd376a9796aadba88f1))
+
 ## 1.61.0 (2025-01-31)
 
 Full Changelog: [v1.60.2...v1.61.0](https://github.com/openai/openai-python/compare/v1.60.2...v1.61.0)
diff --git a/api.md b/api.md
index c1262fd2c5..efbfeaa68f 100644
--- a/api.md
+++ b/api.md
@@ -255,6 +255,7 @@ from openai.types.beta.realtime import (
     ConversationItemInputAudioTranscriptionFailedEvent,
     ConversationItemTruncateEvent,
     ConversationItemTruncatedEvent,
+    ConversationItemWithReference,
     ErrorEvent,
     InputAudioBufferAppendEvent,
     InputAudioBufferClearEvent,
diff --git a/pyproject.toml b/pyproject.toml
index 07913fcbd2..6f1a6eb28a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
 [project]
 name = "openai"
-version = "1.61.0"
+version = "1.61.1"
 description = "The official Python library for the openai API"
 dynamic = ["readme"]
 license = "Apache-2.0"
@@ -194,7 +194,7 @@ select = [
   "T201",
   "T203",
   # misuse of typing.TYPE_CHECKING
-  "TCH004",
+  "TC004",
   # import rules
   "TID251",
 ]
diff --git a/requirements-dev.lock b/requirements-dev.lock
index 38cc6e1cf2..5599057b66 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -140,7 +140,7 @@ requests==2.31.0
 respx==0.22.0
 rich==13.7.1
     # via inline-snapshot
-ruff==0.6.9
+ruff==0.9.4
 setuptools==68.2.2
     # via nodeenv
 six==1.16.0
diff --git a/scripts/utils/ruffen-docs.py b/scripts/utils/ruffen-docs.py
index 37b3d94f0f..0cf2bd2fd9 100644
--- a/scripts/utils/ruffen-docs.py
+++ b/scripts/utils/ruffen-docs.py
@@ -47,7 +47,7 @@ def _md_match(match: Match[str]) -> str:
         with _collect_error(match):
             code = format_code_block(code)
         code = textwrap.indent(code, match["indent"])
-        return f'{match["before"]}{code}{match["after"]}'
+        return f"{match['before']}{code}{match['after']}"
 
     def _pycon_match(match: Match[str]) -> str:
         code = ""
@@ -97,7 +97,7 @@ def finish_fragment() -> None:
     def _md_pycon_match(match: Match[str]) -> str:
         code = _pycon_match(match)
         code = textwrap.indent(code, match["indent"])
-        return f'{match["before"]}{code}{match["after"]}'
+        return f"{match['before']}{code}{match['after']}"
 
     src = MD_RE.sub(_md_match, src)
     src = MD_PYCON_RE.sub(_md_pycon_match, src)
diff --git a/src/openai/_constants.py b/src/openai/_constants.py
index 3f82bed037..7029dc72b0 100644
--- a/src/openai/_constants.py
+++ b/src/openai/_constants.py
@@ -6,7 +6,7 @@
 OVERRIDE_CAST_TO_HEADER = "____stainless_override_cast_to"
 
 # default timeout is 10 minutes
-DEFAULT_TIMEOUT = httpx.Timeout(timeout=600.0, connect=5.0)
+DEFAULT_TIMEOUT = httpx.Timeout(timeout=600, connect=5.0)
 DEFAULT_MAX_RETRIES = 2
 DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=1000, max_keepalive_connections=100)
 
diff --git a/src/openai/_models.py b/src/openai/_models.py
index 23456d9f80..c6e1305087 100644
--- a/src/openai/_models.py
+++ b/src/openai/_models.py
@@ -197,7 +197,7 @@ def to_json(
     @override
     def __str__(self) -> str:
         # mypy complains about an invalid self arg
-        return f'{self.__repr_name__()}({self.__repr_str__(", ")})'  # type: ignore[misc]
+        return f"{self.__repr_name__()}({self.__repr_str__(', ')})"  # type: ignore[misc]
 
     # Override the 'construct' method in a way that supports recursive parsing without validation.
     # Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836.
diff --git a/src/openai/_version.py b/src/openai/_version.py
index e9ab8be65e..7ffe16b95d 100644
--- a/src/openai/_version.py
+++ b/src/openai/_version.py
@@ -1,4 +1,4 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 __title__ = "openai"
-__version__ = "1.61.0"  # x-release-please-version
+__version__ = "1.61.1"  # x-release-please-version
diff --git a/src/openai/cli/_api/chat/completions.py b/src/openai/cli/_api/chat/completions.py
index c299741fe0..feedb5ccab 100644
--- a/src/openai/cli/_api/chat/completions.py
+++ b/src/openai/cli/_api/chat/completions.py
@@ -100,13 +100,17 @@ def create(args: CLIChatCompletionCreateArgs) -> None:
             "messages": [
                 {"role": cast(Literal["user"], message.role), "content": message.content} for message in args.message
             ],
-            "n": args.n,
-            "temperature": args.temperature,
-            "top_p": args.top_p,
-            "stop": args.stop,
             # type checkers are not good at inferring union types so we have to set stream afterwards
             "stream": False,
         }
+        if args.temperature is not None:
+            params['temperature'] = args.temperature
+        if args.stop is not None:
+            params['stop'] = args.stop
+        if args.top_p is not None:
+            params['top_p'] = args.top_p
+        if args.n is not None:
+            params['n'] = args.n
         if args.stream:
             params["stream"] = args.stream  # type: ignore
         if args.max_tokens is not None:
diff --git a/src/openai/types/audio/transcription_verbose.py b/src/openai/types/audio/transcription_verbose.py
index 3b18fa4871..2a670189e0 100644
--- a/src/openai/types/audio/transcription_verbose.py
+++ b/src/openai/types/audio/transcription_verbose.py
@@ -10,7 +10,7 @@
 
 
 class TranscriptionVerbose(BaseModel):
-    duration: str
+    duration: float
     """The duration of the input audio."""
 
     language: str
diff --git a/src/openai/types/audio/translation_verbose.py b/src/openai/types/audio/translation_verbose.py
index 5901ae7535..27cb02d64f 100644
--- a/src/openai/types/audio/translation_verbose.py
+++ b/src/openai/types/audio/translation_verbose.py
@@ -9,7 +9,7 @@
 
 
 class TranslationVerbose(BaseModel):
-    duration: str
+    duration: float
     """The duration of the input audio."""
 
     language: str
diff --git a/src/openai/types/beta/realtime/__init__.py b/src/openai/types/beta/realtime/__init__.py
index 372d4ec19d..cd0616dcfa 100644
--- a/src/openai/types/beta/realtime/__init__.py
+++ b/src/openai/types/beta/realtime/__init__.py
@@ -42,6 +42,7 @@
 from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent as InputAudioBufferCommitEvent
 from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent
 from .conversation_item_truncate_event import ConversationItemTruncateEvent as ConversationItemTruncateEvent
+from .conversation_item_with_reference import ConversationItemWithReference as ConversationItemWithReference
 from .input_audio_buffer_cleared_event import InputAudioBufferClearedEvent as InputAudioBufferClearedEvent
 from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent
 from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent
@@ -60,6 +61,9 @@
 from .conversation_item_truncate_event_param import (
     ConversationItemTruncateEventParam as ConversationItemTruncateEventParam,
 )
+from .conversation_item_with_reference_param import (
+    ConversationItemWithReferenceParam as ConversationItemWithReferenceParam,
+)
 from .input_audio_buffer_speech_started_event import (
     InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent,
 )
diff --git a/src/openai/types/beta/realtime/conversation_item_with_reference.py b/src/openai/types/beta/realtime/conversation_item_with_reference.py
new file mode 100644
index 0000000000..31806afc33
--- /dev/null
+++ b/src/openai/types/beta/realtime/conversation_item_with_reference.py
@@ -0,0 +1,67 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+from .conversation_item_content import ConversationItemContent
+
+__all__ = ["ConversationItemWithReference"]
+
+
+class ConversationItemWithReference(BaseModel):
+    id: Optional[str] = None
+    """
+    For an item of type (`message` | `function_call` | `function_call_output`) this
+    field allows the client to assign the unique ID of the item. It is not required
+    because the server will generate one if not provided.
+
+    For an item of type `item_reference`, this field is required and is a reference
+    to any item that has previously existed in the conversation.
+    """
+
+    arguments: Optional[str] = None
+    """The arguments of the function call (for `function_call` items)."""
+
+    call_id: Optional[str] = None
+    """
+    The ID of the function call (for `function_call` and `function_call_output`
+    items). If passed on a `function_call_output` item, the server will check that a
+    `function_call` item with the same ID exists in the conversation history.
+    """
+
+    content: Optional[List[ConversationItemContent]] = None
+    """The content of the message, applicable for `message` items.
+
+    - Message items of role `system` support only `input_text` content
+    - Message items of role `user` support `input_text` and `input_audio` content
+    - Message items of role `assistant` support `text` content.
+    """
+
+    name: Optional[str] = None
+    """The name of the function being called (for `function_call` items)."""
+
+    object: Optional[Literal["realtime.item"]] = None
+    """Identifier for the API object being returned - always `realtime.item`."""
+
+    output: Optional[str] = None
+    """The output of the function call (for `function_call_output` items)."""
+
+    role: Optional[Literal["user", "assistant", "system"]] = None
+    """
+    The role of the message sender (`user`, `assistant`, `system`), only applicable
+    for `message` items.
+    """
+
+    status: Optional[Literal["completed", "incomplete"]] = None
+    """The status of the item (`completed`, `incomplete`).
+
+    These have no effect on the conversation, but are accepted for consistency with
+    the `conversation.item.created` event.
+    """
+
+    type: Optional[Literal["message", "function_call", "function_call_output", "item_reference"]] = None
+    """
+    The type of the item (`message`, `function_call`, `function_call_output`,
+    `item_reference`).
+    """
diff --git a/src/openai/types/beta/realtime/conversation_item_with_reference_param.py b/src/openai/types/beta/realtime/conversation_item_with_reference_param.py
new file mode 100644
index 0000000000..e266cdce32
--- /dev/null
+++ b/src/openai/types/beta/realtime/conversation_item_with_reference_param.py
@@ -0,0 +1,68 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import Literal, TypedDict
+
+from .conversation_item_content_param import ConversationItemContentParam
+
+__all__ = ["ConversationItemWithReferenceParam"]
+
+
+class ConversationItemWithReferenceParam(TypedDict, total=False):
+    id: str
+    """
+    For an item of type (`message` | `function_call` | `function_call_output`) this
+    field allows the client to assign the unique ID of the item. It is not required
+    because the server will generate one if not provided.
+
+    For an item of type `item_reference`, this field is required and is a reference
+    to any item that has previously existed in the conversation.
+    """
+
+    arguments: str
+    """The arguments of the function call (for `function_call` items)."""
+
+    call_id: str
+    """
+    The ID of the function call (for `function_call` and `function_call_output`
+    items). If passed on a `function_call_output` item, the server will check that a
+    `function_call` item with the same ID exists in the conversation history.
+    """
+
+    content: Iterable[ConversationItemContentParam]
+    """The content of the message, applicable for `message` items.
+
+    - Message items of role `system` support only `input_text` content
+    - Message items of role `user` support `input_text` and `input_audio` content
+    - Message items of role `assistant` support `text` content.
+    """
+
+    name: str
+    """The name of the function being called (for `function_call` items)."""
+
+    object: Literal["realtime.item"]
+    """Identifier for the API object being returned - always `realtime.item`."""
+
+    output: str
+    """The output of the function call (for `function_call_output` items)."""
+
+    role: Literal["user", "assistant", "system"]
+    """
+    The role of the message sender (`user`, `assistant`, `system`), only applicable
+    for `message` items.
+    """
+
+    status: Literal["completed", "incomplete"]
+    """The status of the item (`completed`, `incomplete`).
+
+    These have no effect on the conversation, but are accepted for consistency with
+    the `conversation.item.created` event.
+    """
+
+    type: Literal["message", "function_call", "function_call_output", "item_reference"]
+    """
+    The type of the item (`message`, `function_call`, `function_call_output`,
+    `item_reference`).
+    """
diff --git a/src/openai/types/beta/realtime/response_create_event.py b/src/openai/types/beta/realtime/response_create_event.py
index 0801654bd8..d6c5fda926 100644
--- a/src/openai/types/beta/realtime/response_create_event.py
+++ b/src/openai/types/beta/realtime/response_create_event.py
@@ -5,7 +5,7 @@
 
 from ...._models import BaseModel
 from ...shared.metadata import Metadata
-from .conversation_item import ConversationItem
+from .conversation_item_with_reference import ConversationItemWithReference
 
 __all__ = ["ResponseCreateEvent", "Response", "ResponseTool"]
 
@@ -37,11 +37,13 @@ class Response(BaseModel):
     will not add items to default conversation.
     """
 
-    input: Optional[List[ConversationItem]] = None
+    input: Optional[List[ConversationItemWithReference]] = None
     """Input items to include in the prompt for the model.
 
-    Creates a new context for this response, without including the default
-    conversation. Can include references to items from the default conversation.
+    Using this field creates a new context for this Response instead of using the
+    default conversation. An empty array `[]` will clear the context for this
+    Response. Note that this can include references to items from the default
+    conversation.
     """
 
     instructions: Optional[str] = None
diff --git a/src/openai/types/beta/realtime/response_create_event_param.py b/src/openai/types/beta/realtime/response_create_event_param.py
index a87ef955e8..c02fe1b34e 100644
--- a/src/openai/types/beta/realtime/response_create_event_param.py
+++ b/src/openai/types/beta/realtime/response_create_event_param.py
@@ -5,8 +5,8 @@
 from typing import List, Union, Iterable, Optional
 from typing_extensions import Literal, Required, TypedDict
 
-from .conversation_item_param import ConversationItemParam
 from ...shared_params.metadata import Metadata
+from .conversation_item_with_reference_param import ConversationItemWithReferenceParam
 
 __all__ = ["ResponseCreateEventParam", "Response", "ResponseTool"]
 
@@ -38,11 +38,13 @@ class Response(TypedDict, total=False):
     will not add items to default conversation.
     """
 
-    input: Iterable[ConversationItemParam]
+    input: Iterable[ConversationItemWithReferenceParam]
     """Input items to include in the prompt for the model.
 
-    Creates a new context for this response, without including the default
-    conversation. Can include references to items from the default conversation.
+    Using this field creates a new context for this Response instead of using the
+    default conversation. An empty array `[]` will clear the context for this
+    Response. Note that this can include references to items from the default
+    conversation.
     """
 
     instructions: str
diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py
index 7b0ae2e121..dede513f1e 100644
--- a/src/openai/types/chat/chat_completion_chunk.py
+++ b/src/openai/types/chat/chat_completion_chunk.py
@@ -70,7 +70,7 @@ class ChoiceDelta(BaseModel):
     refusal: Optional[str] = None
     """The refusal message generated by the model."""
 
-    role: Optional[Literal["system", "user", "assistant", "tool"]] = None
+    role: Optional[Literal["developer", "system", "user", "assistant", "tool"]] = None
     """The role of the author of this message."""
 
     tool_calls: Optional[List[ChoiceDeltaToolCall]] = None
diff --git a/src/openai/types/chat/chat_completion_role.py b/src/openai/types/chat/chat_completion_role.py
index c2ebef74c8..3ec5e9ad87 100644
--- a/src/openai/types/chat/chat_completion_role.py
+++ b/src/openai/types/chat/chat_completion_role.py
@@ -4,4 +4,4 @@
 
 __all__ = ["ChatCompletionRole"]
 
-ChatCompletionRole: TypeAlias = Literal["system", "user", "assistant", "tool", "function"]
+ChatCompletionRole: TypeAlias = Literal["developer", "system", "user", "assistant", "tool", "function"]