Skip to content

Commit e9b0cde

Browse files
authored
Remove references to Pro 1.0 (#600)
* remove references to 1.0-pro Change-Id: I405c87d495c73550cfbd00a13249cb1e30ab0989 * remove references to gemini-pro Change-Id: Ied2f0b7112dd5d61390da3e84457a2fb3f770665 * Update models.py * format Change-Id: Ib3a0c90bfc6ec7f8f793917b3140769e2635a8e9
1 parent c8eadc4 commit e9b0cde

File tree

9 files changed

+46
-42
lines changed

9 files changed

+46
-42
lines changed

docs/api/google/generativeai/ChatSession.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ Contains an ongoing conversation with the model.
3939
<!-- Placeholder for "Used in" -->
4040

4141
```
42-
>>> model = genai.GenerativeModel('models/gemini-pro')
42+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
4343
>>> chat = model.start_chat()
4444
>>> response = chat.send_message("Hello")
4545
>>> print(response.text)
@@ -136,7 +136,7 @@ Sends the conversation history with the added message and returns the model's re
136136
Appends the request and response to the conversation history.
137137

138138
```
139-
>>> model = genai.GenerativeModel('models/gemini-pro')
139+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
140140
>>> chat = model.start_chat()
141141
>>> response = chat.send_message("Hello")
142142
>>> print(response.text)

docs/api/google/generativeai/GenerativeModel.md

+4-4
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ The `genai.GenerativeModel` class wraps default parameters for calls to <a href=
3131

3232
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
3333
<code>google.generativeai.GenerativeModel(
34-
model_name: str = &#x27;gemini-pro&#x27;,
34+
model_name: str = &#x27;gemini-1.5-flash&#x27;,
3535
safety_settings: (safety_types.SafetySettingOptions | None) = None,
3636
generation_config: (generation_types.GenerationConfigType | None) = None,
3737
tools: (content_types.FunctionLibraryType | None) = None,
@@ -51,7 +51,7 @@ requests. What media-types are supported for input and output is model-dependant
5151
>>> import google.generativeai as genai
5252
>>> import PIL.Image
5353
>>> genai.configure(api_key='YOUR_API_KEY')
54-
>>> model = genai.GenerativeModel('models/gemini-pro')
54+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
5555
>>> result = model.generate_content('Tell me a story about a magic backpack')
5656
>>> result.text
5757
"In the quaint little town of Lakeside, there lived a young girl named Lily..."
@@ -62,7 +62,7 @@ requests. What media-types are supported for input and output is model-dependant
6262

6363

6464
```
65-
>>> model = genai.GenerativeModel('models/gemini-pro')
65+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
6666
>>> result = model.generate_content([
6767
... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')])
6868
>>> result.text
@@ -270,7 +270,7 @@ This <a href="../../google/generativeai/GenerativeModel.md#generate_content"><co
270270
conversations.
271271

272272
```
273-
>>> model = genai.GenerativeModel('models/gemini-pro')
273+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
274274
>>> response = model.generate_content('Tell me a story about a magic backpack')
275275
>>> response.text
276276
```

docs/api/google/generativeai/get_model.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ Calls the API to fetch a model by name.
3838

3939
```
4040
import pprint
41-
model = genai.get_model('models/gemini-pro')
41+
model = genai.get_model('models/gemini-1.5-flash')
4242
pprint.pprint(model)
4343
```
4444

google/generativeai/generative_models.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -36,14 +36,14 @@ class GenerativeModel:
3636
>>> import google.generativeai as genai
3737
>>> import PIL.Image
3838
>>> genai.configure(api_key='YOUR_API_KEY')
39-
>>> model = genai.GenerativeModel('models/gemini-pro')
39+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
4040
>>> result = model.generate_content('Tell me a story about a magic backpack')
4141
>>> result.text
4242
"In the quaint little town of Lakeside, there lived a young girl named Lily..."
4343
4444
Multimodal input:
4545
46-
>>> model = genai.GenerativeModel('models/gemini-pro')
46+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
4747
>>> result = model.generate_content([
4848
... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')])
4949
>>> result.text
@@ -250,7 +250,7 @@ def generate_content(
250250
This `GenerativeModel.generate_content` method can handle multimodal input, and multi-turn
251251
conversations.
252252
253-
>>> model = genai.GenerativeModel('models/gemini-pro')
253+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
254254
>>> response = model.generate_content('Tell me a story about a magic backpack')
255255
>>> response.text
256256
@@ -481,7 +481,7 @@ def start_chat(
481481
class ChatSession:
482482
"""Contains an ongoing conversation with the model.
483483
484-
>>> model = genai.GenerativeModel('models/gemini-pro')
484+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
485485
>>> chat = model.start_chat()
486486
>>> response = chat.send_message("Hello")
487487
>>> print(response.text)
@@ -524,7 +524,7 @@ def send_message(
524524
525525
Appends the request and response to the conversation history.
526526
527-
>>> model = genai.GenerativeModel('models/gemini-pro')
527+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
528528
>>> chat = model.start_chat()
529529
>>> response = chat.send_message("Hello")
530530
>>> print(response.text)

google/generativeai/models.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def get_model(
4040
4141
```
4242
import pprint
43-
model = genai.get_model('models/gemini-pro')
43+
model = genai.get_model('models/gemini-1.5-flash')
4444
pprint.pprint(model)
4545
```
4646
@@ -112,7 +112,7 @@ def get_tuned_model(
112112
113113
```
114114
import pprint
115-
model = genai.get_tuned_model('tunedModels/gemini-1.0-pro-001')
115+
model = genai.get_tuned_model('tunedModels/gemini-1.5-flash')
116116
pprint.pprint(model)
117117
```
118118

google/generativeai/notebook/text_model.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
from google.generativeai.types import generation_types
2121
from google.generativeai.notebook.lib import model as model_lib
2222

23-
_DEFAULT_MODEL = "models/gemini-pro"
23+
_DEFAULT_MODEL = "models/gemini-1.5-flash"
2424

2525

2626
class TextModel(model_lib.AbstractModel):

samples/rest/tuned_models.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ curl -X POST "https://generativelanguage.googleapis.com/v1beta/tunedModels?key=$
77
-d '
88
{
99
"display_name": "number generator model",
10-
"base_model": "models/gemini-1.0-pro-001",
10+
"base_model": "models/gemini-1.5-flash-001-tuning",
1111
"tuning_task": {
1212
"hyperparameters": {
1313
"batch_size": 2,

tests/test_generative_models.py

+27-23
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def setUp(self):
115115

116116
def test_hello(self):
117117
# Generate text from text prompt
118-
model = generative_models.GenerativeModel(model_name="gemini-pro")
118+
model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")
119119

120120
self.responses["generate_content"].append(simple_response("world!"))
121121

@@ -138,7 +138,7 @@ def test_hello(self):
138138
)
139139
def test_image(self, content):
140140
# Generate text from image
141-
model = generative_models.GenerativeModel("gemini-pro")
141+
model = generative_models.GenerativeModel("gemini-1.5-flash")
142142

143143
cat = "It's a cat"
144144
self.responses["generate_content"].append(simple_response(cat))
@@ -172,7 +172,7 @@ def test_image(self, content):
172172
)
173173
def test_generation_config_overwrite(self, config1, config2):
174174
# Generation config
175-
model = generative_models.GenerativeModel("gemini-pro", generation_config=config1)
175+
model = generative_models.GenerativeModel("gemini-1.5-flash", generation_config=config1)
176176

177177
self.responses["generate_content"] = [
178178
simple_response(" world!"),
@@ -218,7 +218,7 @@ def test_generation_config_overwrite(self, config1, config2):
218218
)
219219
def test_safety_overwrite(self, safe1, safe2):
220220
# Safety
221-
model = generative_models.GenerativeModel("gemini-pro", safety_settings=safe1)
221+
model = generative_models.GenerativeModel("gemini-1.5-flash", safety_settings=safe1)
222222

223223
self.responses["generate_content"] = [
224224
simple_response(" world!"),
@@ -253,7 +253,7 @@ def test_stream_basic(self):
253253
chunks = ["first", " second", " third"]
254254
self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)]
255255

256-
model = generative_models.GenerativeModel("gemini-pro")
256+
model = generative_models.GenerativeModel("gemini-1.5-flash")
257257
response = model.generate_content("Hello", stream=True)
258258

259259
self.assertEqual(self.observed_requests[0].contents[0].parts[0].text, "Hello")
@@ -267,7 +267,7 @@ def test_stream_lookahead(self):
267267
chunks = ["first", " second", " third"]
268268
self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)]
269269

270-
model = generative_models.GenerativeModel("gemini-pro")
270+
model = generative_models.GenerativeModel("gemini-1.5-flash")
271271
response = model.generate_content("Hello", stream=True)
272272

273273
self.assertEqual(self.observed_requests[0].contents[0].parts[0].text, "Hello")
@@ -287,7 +287,7 @@ def test_stream_prompt_feedback_blocked(self):
287287
]
288288
self.responses["stream_generate_content"] = [(chunk for chunk in chunks)]
289289

290-
model = generative_models.GenerativeModel("gemini-pro")
290+
model = generative_models.GenerativeModel("gemini-1.5-flash")
291291
response = model.generate_content("Bad stuff!", stream=True)
292292

293293
self.assertEqual(
@@ -322,7 +322,7 @@ def test_stream_prompt_feedback_not_blocked(self):
322322
]
323323
self.responses["stream_generate_content"] = [(chunk for chunk in chunks)]
324324

325-
model = generative_models.GenerativeModel("gemini-pro")
325+
model = generative_models.GenerativeModel("gemini-1.5-flash")
326326
response = model.generate_content("Hello", stream=True)
327327

328328
self.assertEqual(
@@ -389,7 +389,7 @@ def add(a: int, b: int) -> int:
389389

390390
def test_chat(self):
391391
# Multi turn chat
392-
model = generative_models.GenerativeModel("gemini-pro")
392+
model = generative_models.GenerativeModel("gemini-1.5-flash")
393393
chat = model.start_chat()
394394

395395
self.responses["generate_content"] = [
@@ -423,7 +423,7 @@ def test_chat(self):
423423
def test_chat_roles(self):
424424
self.responses["generate_content"] = [simple_response("hello!")]
425425

426-
model = generative_models.GenerativeModel("gemini-pro")
426+
model = generative_models.GenerativeModel("gemini-1.5-flash")
427427
chat = model.start_chat()
428428
response = chat.send_message("hello?")
429429
history = chat.history
@@ -792,7 +792,7 @@ def test_tool_config(self, tool_config, expected_tool_config):
792792
)
793793
self.responses["generate_content"] = [simple_response("echo echo")]
794794

795-
model = generative_models.GenerativeModel("gemini-pro", tools=tools)
795+
model = generative_models.GenerativeModel("gemini-1.5-flash", tools=tools)
796796
_ = model.generate_content("Hello", tools=[tools], tool_config=tool_config)
797797

798798
req = self.observed_requests[0]
@@ -811,7 +811,9 @@ def test_tool_config(self, tool_config, expected_tool_config):
811811
)
812812
def test_system_instruction(self, instruction, expected_instr):
813813
self.responses["generate_content"] = [simple_response("echo echo")]
814-
model = generative_models.GenerativeModel("gemini-pro", system_instruction=instruction)
814+
model = generative_models.GenerativeModel(
815+
"gemini-1.5-flash", system_instruction=instruction
816+
)
815817

816818
_ = model.generate_content("test")
817819

@@ -852,7 +854,7 @@ def test_count_tokens_smoke(self, kwargs):
852854
)
853855

854856
def test_repr_for_unary_non_streamed_response(self):
855-
model = generative_models.GenerativeModel(model_name="gemini-pro")
857+
model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")
856858
self.responses["generate_content"].append(simple_response("world!"))
857859
response = model.generate_content("Hello")
858860

@@ -885,7 +887,7 @@ def test_repr_for_streaming_start_to_finish(self):
885887
chunks = ["first", " second", " third"]
886888
self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)]
887889

888-
model = generative_models.GenerativeModel("gemini-pro")
890+
model = generative_models.GenerativeModel("gemini-1.5-flash")
889891
response = model.generate_content("Hello", stream=True)
890892
iterator = iter(response)
891893

@@ -980,7 +982,7 @@ def test_repr_error_info_for_stream_prompt_feedback_blocked(self):
980982
]
981983
self.responses["stream_generate_content"] = [(chunk for chunk in chunks)]
982984

983-
model = generative_models.GenerativeModel("gemini-pro")
985+
model = generative_models.GenerativeModel("gemini-1.5-flash")
984986
response = model.generate_content("Bad stuff!", stream=True)
985987

986988
result = repr(response)
@@ -1096,7 +1098,7 @@ def test_repr_error_info_for_chat_streaming_unexpected_stop(self):
10961098

10971099
def test_repr_for_multi_turn_chat(self):
10981100
# Multi turn chat
1099-
model = generative_models.GenerativeModel("gemini-pro")
1101+
model = generative_models.GenerativeModel("gemini-1.5-flash")
11001102
chat = model.start_chat()
11011103

11021104
self.responses["generate_content"] = [
@@ -1119,7 +1121,7 @@ def test_repr_for_multi_turn_chat(self):
11191121
"""\
11201122
ChatSession(
11211123
model=genai.GenerativeModel(
1122-
model_name='models/gemini-pro',
1124+
model_name='models/gemini-1.5-flash',
11231125
generation_config={},
11241126
safety_settings={},
11251127
tools=None,
@@ -1133,7 +1135,7 @@ def test_repr_for_multi_turn_chat(self):
11331135

11341136
def test_repr_for_incomplete_streaming_chat(self):
11351137
# Multi turn chat
1136-
model = generative_models.GenerativeModel("gemini-pro")
1138+
model = generative_models.GenerativeModel("gemini-1.5-flash")
11371139
chat = model.start_chat()
11381140

11391141
self.responses["stream_generate_content"] = [
@@ -1148,7 +1150,7 @@ def test_repr_for_incomplete_streaming_chat(self):
11481150
"""\
11491151
ChatSession(
11501152
model=genai.GenerativeModel(
1151-
model_name='models/gemini-pro',
1153+
model_name='models/gemini-1.5-flash',
11521154
generation_config={},
11531155
safety_settings={},
11541156
tools=None,
@@ -1162,7 +1164,7 @@ def test_repr_for_incomplete_streaming_chat(self):
11621164

11631165
def test_repr_for_broken_streaming_chat(self):
11641166
# Multi turn chat
1165-
model = generative_models.GenerativeModel("gemini-pro")
1167+
model = generative_models.GenerativeModel("gemini-1.5-flash")
11661168
chat = model.start_chat()
11671169

11681170
self.responses["stream_generate_content"] = [
@@ -1193,7 +1195,7 @@ def test_repr_for_broken_streaming_chat(self):
11931195
"""\
11941196
ChatSession(
11951197
model=genai.GenerativeModel(
1196-
model_name='models/gemini-pro',
1198+
model_name='models/gemini-1.5-flash',
11971199
generation_config={},
11981200
safety_settings={},
11991201
tools=None,
@@ -1206,7 +1208,9 @@ def test_repr_for_broken_streaming_chat(self):
12061208
self.assertEqual(expected, result)
12071209

12081210
def test_repr_for_system_instruction(self):
1209-
model = generative_models.GenerativeModel("gemini-pro", system_instruction="Be excellent.")
1211+
model = generative_models.GenerativeModel(
1212+
"gemini-1.5-flash", system_instruction="Be excellent."
1213+
)
12101214
result = repr(model)
12111215
self.assertIn("system_instruction='Be excellent.'", result)
12121216

@@ -1237,7 +1241,7 @@ def test_chat_with_request_options(self):
12371241
)
12381242
request_options = {"timeout": 120}
12391243

1240-
model = generative_models.GenerativeModel("gemini-pro")
1244+
model = generative_models.GenerativeModel("gemini-1.5-flash")
12411245
chat = model.start_chat()
12421246
chat.send_message("hello", request_options=helper_types.RequestOptions(**request_options))
12431247

tests/test_generative_models_async.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ async def count_tokens(
8080

8181
async def test_basic(self):
8282
# Generate text from text prompt
83-
model = generative_models.GenerativeModel(model_name="gemini-pro")
83+
model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")
8484

8585
self.responses["generate_content"] = [simple_response("world!")]
8686

@@ -93,7 +93,7 @@ async def test_basic(self):
9393

9494
async def test_streaming(self):
9595
# Generate text from text prompt
96-
model = generative_models.GenerativeModel(model_name="gemini-pro")
96+
model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")
9797

9898
async def responses():
9999
for c in "world!":
@@ -195,7 +195,7 @@ async def test_tool_config(self, tool_config, expected_tool_config):
195195
)
196196
self.responses["generate_content"] = [simple_response("echo echo")]
197197

198-
model = generative_models.GenerativeModel("gemini-pro", tools=tools)
198+
model = generative_models.GenerativeModel("gemini-1.5-flash", tools=tools)
199199
_ = await model.generate_content_async("Hello", tools=[tools], tool_config=tool_config)
200200

201201
req = self.observed_requests[0]

0 commit comments

Comments
 (0)