Skip to content

Commit afd066d

Browse files
Merge branch 'main' into caching
2 parents 6fafe6b + 0dca4ce commit afd066d

26 files changed

+653
-357
lines changed

google/generativeai/answer.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def _make_grounding_passages(source: GroundingPassagesOptions) -> glm.GroundingP
9292

9393
if not isinstance(source, Iterable):
9494
raise TypeError(
95-
f"The 'source' argument must be an instance of 'GroundingPassagesOptions', but got a '{type(source).__name__}' object instead."
95+
f"Invalid input: The 'source' argument must be an instance of 'GroundingPassagesOptions'. Received a '{type(source).__name__}' object instead."
9696
)
9797

9898
passages = []
@@ -156,9 +156,9 @@ def _make_semantic_retriever_config(
156156
source["source"] = _maybe_get_source_name(source["source"])
157157
else:
158158
raise TypeError(
159-
"Could create a `glm.SemanticRetrieverConfig` from:\n"
160-
f" type: {type(source)}\n"
161-
f" value: {source}"
159+
f"Invalid input: Failed to create a 'glm.SemanticRetrieverConfig' from the provided source. "
160+
f"Received type: {type(source).__name__}, "
161+
f"Received value: {source}"
162162
)
163163

164164
if source["query"] is None:
@@ -208,15 +208,17 @@ def _make_generate_answer_request(
208208

209209
if inline_passages is not None and semantic_retriever is not None:
210210
raise ValueError(
211-
"Either `inline_passages` or `semantic_retriever_config` must be set, not both."
211+
f"Invalid configuration: Please set either 'inline_passages' or 'semantic_retriever_config', but not both. "
212+
f"Received for inline_passages: {inline_passages}, and for semantic_retriever: {semantic_retriever}."
212213
)
213214
elif inline_passages is not None:
214215
inline_passages = _make_grounding_passages(inline_passages)
215216
elif semantic_retriever is not None:
216217
semantic_retriever = _make_semantic_retriever_config(semantic_retriever, contents[-1])
217218
else:
218219
raise TypeError(
219-
f"The source must be either an `inline_passages` xor `semantic_retriever_config`, but both are `None`"
220+
f"Invalid configuration: Either 'inline_passages' or 'semantic_retriever_config' must be provided, but currently both are 'None'. "
221+
f"Received for inline_passages: {inline_passages}, and for semantic_retriever: {semantic_retriever}."
220222
)
221223

222224
if answer_style:
@@ -245,8 +247,7 @@ def generate_answer(
245247
client: glm.GenerativeServiceClient | None = None,
246248
request_options: helper_types.RequestOptionsType | None = None,
247249
):
248-
"""
249-
Calls the GenerateAnswer API and returns a `types.Answer` containing the response.
250+
"""Calls the GenerateAnswer API and returns a `types.Answer` containing the response.
250251
251252
You can pass a literal list of text chunks:
252253

google/generativeai/client.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,9 @@ def __init__(self, *args, **kwargs):
5252
def _setup_discovery_api(self):
5353
api_key = self._client_options.api_key
5454
if api_key is None:
55-
raise ValueError("Uploading to the File API requires an API key.")
55+
raise ValueError(
56+
"Invalid operation: Uploading to the File API requires an API key. Please provide a valid API key."
57+
)
5658

5759
request = googleapiclient.http.HttpRequest(
5860
http=httplib2.Http(),
@@ -95,7 +97,9 @@ def create_file(
9597

9698
class FileServiceAsyncClient(glm.FileServiceAsyncClient):
9799
async def create_file(self, *args, **kwargs):
98-
raise NotImplementedError("`create_file` is not yet implemented for the async client.")
100+
raise NotImplementedError(
101+
"The `create_file` method is currently not supported for the asynchronous client."
102+
)
99103

100104

101105
@dataclasses.dataclass
@@ -123,7 +127,7 @@ def configure(
123127
client_info: gapic_v1.client_info.ClientInfo | None = None,
124128
default_metadata: Sequence[tuple[str, str]] = (),
125129
) -> None:
126-
"""Captures default client configuration.
130+
"""Initializes default client configurations using specified parameters or environment variables.
127131
128132
If no API key has been provided (either directly, or on `client_options`) and the
129133
`GOOGLE_API_KEY` environment variable is set, it will be used as the API key.
@@ -149,7 +153,9 @@ def configure(
149153

150154
if had_api_key_value:
151155
if api_key is not None:
152-
raise ValueError("You can't set both `api_key` and `client_options['api_key']`.")
156+
raise ValueError(
157+
"Invalid configuration: Please set either `api_key` or `client_options['api_key']`, but not both."
158+
)
153159
else:
154160
if api_key is None:
155161
# If no key is provided explicitly, attempt to load one from the

google/generativeai/discuss.py

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -69,15 +69,19 @@ def _make_messages(
6969
elif len(even_authors) == 1:
7070
even_author = even_authors.pop()
7171
else:
72-
raise discuss_types.AuthorError("Authors are not strictly alternating")
72+
raise discuss_types.AuthorError(
73+
"Invalid sequence: Authors in the discussion must alternate strictly."
74+
)
7375

7476
odd_authors = set(msg.author for msg in messages[1::2] if msg.author)
7577
if not odd_authors:
7678
odd_author = "1"
7779
elif len(odd_authors) == 1:
7880
odd_author = odd_authors.pop()
7981
else:
80-
raise discuss_types.AuthorError("Authors are not strictly alternating")
82+
raise discuss_types.AuthorError(
83+
"Invalid sequence: Authors in the discussion must alternate strictly."
84+
)
8185

8286
if all(msg.author for msg in messages):
8387
return messages
@@ -130,8 +134,8 @@ def _make_examples_from_flat(
130134
raise ValueError(
131135
textwrap.dedent(
132136
f"""\
133-
You must pass `Primer` objects, pairs of messages, or an *even* number of messages, got:
134-
{len(examples)} messages"""
137+
Invalid input: You must pass either `Primer` objects, pairs of messages, or an even number of messages.
138+
Currently, {len(examples)} messages were provided, which is an odd number."""
135139
)
136140
)
137141
result = []
@@ -186,7 +190,7 @@ def _make_examples(
186190
else:
187191
if not ("input" in first and "output" in first):
188192
raise TypeError(
189-
"To create an `Example` from a dict you must supply both `input` and an `output` keys"
193+
"Invalid dictionary format: To create an `Example` instance, the dictionary must contain both `input` and `output` keys."
190194
)
191195
else:
192196
if isinstance(first, discuss_types.MESSAGE_OPTIONS):
@@ -232,8 +236,7 @@ def _make_message_prompt_dict(
232236
flat_prompt = (context is not None) or (examples is not None) or (messages is not None)
233237
if flat_prompt:
234238
raise ValueError(
235-
"You can't set `prompt`, and its fields `(context, examples, messages)`"
236-
" at the same time"
239+
"Invalid configuration: Either `prompt` or its fields `(context, examples, messages)` should be set, but not both simultaneously."
237240
)
238241
if isinstance(prompt, glm.MessagePrompt):
239242
return prompt
@@ -245,7 +248,7 @@ def _make_message_prompt_dict(
245248
keys = set(prompt.keys())
246249
if not keys.issubset(discuss_types.MESSAGE_PROMPT_KEYS):
247250
raise KeyError(
248-
f"Found extra entries in the prompt dictionary: {keys - discuss_types.MESSAGE_PROMPT_KEYS}"
251+
f"Invalid prompt dictionary: Extra entries found that are not recognized: {keys - discuss_types.MESSAGE_PROMPT_KEYS}. Please check the keys."
249252
)
250253

251254
examples = prompt.get("examples", None)
@@ -319,7 +322,7 @@ def chat(
319322
client: glm.DiscussServiceClient | None = None,
320323
request_options: helper_types.RequestOptionsType | None = None,
321324
) -> discuss_types.ChatResponse:
322-
"""Calls the API and returns a `types.ChatResponse` containing the response.
325+
"""Calls the API to initiate a chat with a model using provided parameters
323326
324327
Args:
325328
model: Which model to call, as a string or a `types.Model`.
@@ -419,6 +422,7 @@ async def chat_async(
419422
client: glm.DiscussServiceAsyncClient | None = None,
420423
request_options: helper_types.RequestOptionsType | None = None,
421424
) -> discuss_types.ChatResponse:
425+
"""Calls the API asynchronously to initiate a chat with a model using provided parameters"""
422426
request = _make_generate_message_request(
423427
model=model,
424428
context=context,
@@ -473,12 +477,13 @@ def reply(
473477
request_options: helper_types.RequestOptionsType | None = None,
474478
) -> discuss_types.ChatResponse:
475479
if isinstance(self._client, glm.DiscussServiceAsyncClient):
476-
raise TypeError(f"reply can't be called on an async client, use reply_async instead.")
480+
raise TypeError(
481+
"Invalid operation: The 'reply' method cannot be called on an asynchronous client. Please use the 'reply_async' method instead."
482+
)
477483
if self.last is None:
478484
raise ValueError(
479-
"The last response from the model did not return any candidates.\n"
480-
"Check the `.filters` attribute to see why the responses were filtered:\n"
481-
f"{self.filters}"
485+
f"Invalid operation: No candidates returned from the model's last response. "
486+
f"Please inspect the '.filters' attribute to understand why responses were filtered out. Current filters: {self.filters}"
482487
)
483488

484489
request = self.to_dict()
@@ -497,7 +502,7 @@ async def reply_async(
497502
) -> discuss_types.ChatResponse:
498503
if isinstance(self._client, glm.DiscussServiceClient):
499504
raise TypeError(
500-
f"reply_async can't be called on a non-async client, use reply instead."
505+
"Invalid method call: `reply_async` is not supported on a non-async client. Please use the `reply` method instead."
501506
)
502507
request = self.to_dict()
503508
request.pop("candidates")
@@ -577,6 +582,8 @@ def count_message_tokens(
577582
client: glm.DiscussServiceAsyncClient | None = None,
578583
request_options: helper_types.RequestOptionsType | None = None,
579584
) -> discuss_types.TokenCount:
585+
"""Calls the API to calculate the number of tokens used in the prompt."""
586+
580587
model = model_types.make_model_name(model)
581588
prompt = _make_message_prompt(prompt, context=context, examples=examples, messages=messages)
582589

google/generativeai/embedding.py

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,9 @@ def to_task_type(x: EmbeddingTaskTypeOptions) -> EmbeddingTaskType:
8282

8383
def _batched(iterable: Iterable[T], n: int) -> Iterable[list[T]]:
8484
if n < 1:
85-
raise ValueError(f"Batch size `n` must be >0, got: {n}")
85+
raise ValueError(
86+
f"Invalid input: The batch size 'n' must be a positive integer. You entered: {n}. Please enter a number greater than 0."
87+
)
8688
batch = []
8789
for item in iterable:
8890
batch.append(item)
@@ -167,11 +169,13 @@ def embed_content(
167169

168170
if title and to_task_type(task_type) is not EmbeddingTaskType.RETRIEVAL_DOCUMENT:
169171
raise ValueError(
170-
"If a title is specified, the task must be a retrieval document type task."
172+
f"Invalid task type: When a title is specified, the task must be of a 'retrieval document' type. Received task type: {task_type} and title: {title}."
171173
)
172174

173175
if output_dimensionality and output_dimensionality < 0:
174-
raise ValueError("`output_dimensionality` must be a non-negative integer.")
176+
raise ValueError(
177+
f"Invalid value: `output_dimensionality` must be a non-negative integer. Received: {output_dimensionality}."
178+
)
175179

176180
if task_type:
177181
task_type = to_task_type(task_type)
@@ -247,7 +251,8 @@ async def embed_content_async(
247251
client: glm.GenerativeServiceAsyncClient = None,
248252
request_options: helper_types.RequestOptionsType | None = None,
249253
) -> text_types.EmbeddingDict | text_types.BatchEmbeddingDict:
250-
"""The async version of `genai.embed_content`."""
254+
"""Calls the API to create async embeddings for content passed in."""
255+
251256
model = model_types.make_model_name(model)
252257

253258
if request_options is None:
@@ -258,11 +263,12 @@ async def embed_content_async(
258263

259264
if title and to_task_type(task_type) is not EmbeddingTaskType.RETRIEVAL_DOCUMENT:
260265
raise ValueError(
261-
"If a title is specified, the task must be a retrieval document type task."
266+
f"Invalid task type: When a title is specified, the task must be of a 'retrieval document' type. Received task type: {task_type} and title: {title}."
262267
)
263-
264268
if output_dimensionality and output_dimensionality < 0:
265-
raise ValueError("`output_dimensionality` must be a non-negative integer.")
269+
raise ValueError(
270+
f"Invalid value: `output_dimensionality` must be a non-negative integer. Received: {output_dimensionality}."
271+
)
266272

267273
if task_type:
268274
task_type = to_task_type(task_type)

google/generativeai/files.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def upload_file(
3737
display_name: str | None = None,
3838
resumable: bool = True,
3939
) -> file_types.File:
40-
"""Uploads a file using a supported file service.
40+
"""Calls the API to upload a file using a supported file service.
4141
4242
Args:
4343
path: The path to the file to be uploaded.
@@ -73,6 +73,7 @@ def upload_file(
7373

7474

7575
def list_files(page_size=100) -> Iterable[file_types.File]:
76+
"""Calls the API to list files using a supported file service."""
7677
client = get_default_file_client()
7778

7879
response = client.list_files(glm.ListFilesRequest(page_size=page_size))
@@ -81,11 +82,13 @@ def list_files(page_size=100) -> Iterable[file_types.File]:
8182

8283

8384
def get_file(name) -> file_types.File:
85+
"""Calls the API to retrieve a specified file using a supported file service."""
8486
client = get_default_file_client()
8587
return file_types.File(client.get_file(name=name))
8688

8789

8890
def delete_file(name):
91+
"""Calls the API to permanently delete a specified file using a supported file service."""
8992
if isinstance(name, (file_types.File, glm.File)):
9093
name = name.name
9194
request = glm.DeleteFileRequest(name=name)

0 commit comments

Comments
 (0)