@@ -115,7 +115,7 @@ def setUp(self):
115
115
116
116
def test_hello (self ):
117
117
# Generate text from text prompt
118
- model = generative_models .GenerativeModel (model_name = "gemini-pro " )
118
+ model = generative_models .GenerativeModel (model_name = "gemini-1.5-flash " )
119
119
120
120
self .responses ["generate_content" ].append (simple_response ("world!" ))
121
121
@@ -138,7 +138,7 @@ def test_hello(self):
138
138
)
139
139
def test_image (self , content ):
140
140
# Generate text from image
141
- model = generative_models .GenerativeModel ("gemini-pro " )
141
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
142
142
143
143
cat = "It's a cat"
144
144
self .responses ["generate_content" ].append (simple_response (cat ))
@@ -172,7 +172,7 @@ def test_image(self, content):
172
172
)
173
173
def test_generation_config_overwrite (self , config1 , config2 ):
174
174
# Generation config
175
- model = generative_models .GenerativeModel ("gemini-pro " , generation_config = config1 )
175
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , generation_config = config1 )
176
176
177
177
self .responses ["generate_content" ] = [
178
178
simple_response (" world!" ),
@@ -218,7 +218,7 @@ def test_generation_config_overwrite(self, config1, config2):
218
218
)
219
219
def test_safety_overwrite (self , safe1 , safe2 ):
220
220
# Safety
221
- model = generative_models .GenerativeModel ("gemini-pro " , safety_settings = safe1 )
221
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , safety_settings = safe1 )
222
222
223
223
self .responses ["generate_content" ] = [
224
224
simple_response (" world!" ),
@@ -253,7 +253,7 @@ def test_stream_basic(self):
253
253
chunks = ["first" , " second" , " third" ]
254
254
self .responses ["stream_generate_content" ] = [(simple_response (text ) for text in chunks )]
255
255
256
- model = generative_models .GenerativeModel ("gemini-pro " )
256
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
257
257
response = model .generate_content ("Hello" , stream = True )
258
258
259
259
self .assertEqual (self .observed_requests [0 ].contents [0 ].parts [0 ].text , "Hello" )
@@ -267,7 +267,7 @@ def test_stream_lookahead(self):
267
267
chunks = ["first" , " second" , " third" ]
268
268
self .responses ["stream_generate_content" ] = [(simple_response (text ) for text in chunks )]
269
269
270
- model = generative_models .GenerativeModel ("gemini-pro " )
270
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
271
271
response = model .generate_content ("Hello" , stream = True )
272
272
273
273
self .assertEqual (self .observed_requests [0 ].contents [0 ].parts [0 ].text , "Hello" )
@@ -287,7 +287,7 @@ def test_stream_prompt_feedback_blocked(self):
287
287
]
288
288
self .responses ["stream_generate_content" ] = [(chunk for chunk in chunks )]
289
289
290
- model = generative_models .GenerativeModel ("gemini-pro " )
290
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
291
291
response = model .generate_content ("Bad stuff!" , stream = True )
292
292
293
293
self .assertEqual (
@@ -322,7 +322,7 @@ def test_stream_prompt_feedback_not_blocked(self):
322
322
]
323
323
self .responses ["stream_generate_content" ] = [(chunk for chunk in chunks )]
324
324
325
- model = generative_models .GenerativeModel ("gemini-pro " )
325
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
326
326
response = model .generate_content ("Hello" , stream = True )
327
327
328
328
self .assertEqual (
@@ -389,7 +389,7 @@ def add(a: int, b: int) -> int:
389
389
390
390
def test_chat (self ):
391
391
# Multi turn chat
392
- model = generative_models .GenerativeModel ("gemini-pro " )
392
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
393
393
chat = model .start_chat ()
394
394
395
395
self .responses ["generate_content" ] = [
@@ -423,7 +423,7 @@ def test_chat(self):
423
423
def test_chat_roles (self ):
424
424
self .responses ["generate_content" ] = [simple_response ("hello!" )]
425
425
426
- model = generative_models .GenerativeModel ("gemini-pro " )
426
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
427
427
chat = model .start_chat ()
428
428
response = chat .send_message ("hello?" )
429
429
history = chat .history
@@ -792,7 +792,7 @@ def test_tool_config(self, tool_config, expected_tool_config):
792
792
)
793
793
self .responses ["generate_content" ] = [simple_response ("echo echo" )]
794
794
795
- model = generative_models .GenerativeModel ("gemini-pro " , tools = tools )
795
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , tools = tools )
796
796
_ = model .generate_content ("Hello" , tools = [tools ], tool_config = tool_config )
797
797
798
798
req = self .observed_requests [0 ]
@@ -811,7 +811,9 @@ def test_tool_config(self, tool_config, expected_tool_config):
811
811
)
812
812
def test_system_instruction (self , instruction , expected_instr ):
813
813
self .responses ["generate_content" ] = [simple_response ("echo echo" )]
814
- model = generative_models .GenerativeModel ("gemini-pro" , system_instruction = instruction )
814
+ model = generative_models .GenerativeModel (
815
+ "gemini-1.5-flash" , system_instruction = instruction
816
+ )
815
817
816
818
_ = model .generate_content ("test" )
817
819
@@ -852,7 +854,7 @@ def test_count_tokens_smoke(self, kwargs):
852
854
)
853
855
854
856
def test_repr_for_unary_non_streamed_response (self ):
855
- model = generative_models .GenerativeModel (model_name = "gemini-pro " )
857
+ model = generative_models .GenerativeModel (model_name = "gemini-1.5-flash " )
856
858
self .responses ["generate_content" ].append (simple_response ("world!" ))
857
859
response = model .generate_content ("Hello" )
858
860
@@ -885,7 +887,7 @@ def test_repr_for_streaming_start_to_finish(self):
885
887
chunks = ["first" , " second" , " third" ]
886
888
self .responses ["stream_generate_content" ] = [(simple_response (text ) for text in chunks )]
887
889
888
- model = generative_models .GenerativeModel ("gemini-pro " )
890
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
889
891
response = model .generate_content ("Hello" , stream = True )
890
892
iterator = iter (response )
891
893
@@ -980,7 +982,7 @@ def test_repr_error_info_for_stream_prompt_feedback_blocked(self):
980
982
]
981
983
self .responses ["stream_generate_content" ] = [(chunk for chunk in chunks )]
982
984
983
- model = generative_models .GenerativeModel ("gemini-pro " )
985
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
984
986
response = model .generate_content ("Bad stuff!" , stream = True )
985
987
986
988
result = repr (response )
@@ -1096,7 +1098,7 @@ def test_repr_error_info_for_chat_streaming_unexpected_stop(self):
1096
1098
1097
1099
def test_repr_for_multi_turn_chat (self ):
1098
1100
# Multi turn chat
1099
- model = generative_models .GenerativeModel ("gemini-pro " )
1101
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
1100
1102
chat = model .start_chat ()
1101
1103
1102
1104
self .responses ["generate_content" ] = [
@@ -1119,7 +1121,7 @@ def test_repr_for_multi_turn_chat(self):
1119
1121
"""\
1120
1122
ChatSession(
1121
1123
model=genai.GenerativeModel(
1122
- model_name='models/gemini-pro ',
1124
+ model_name='models/gemini-1.5-flash ',
1123
1125
generation_config={},
1124
1126
safety_settings={},
1125
1127
tools=None,
@@ -1133,7 +1135,7 @@ def test_repr_for_multi_turn_chat(self):
1133
1135
1134
1136
def test_repr_for_incomplete_streaming_chat (self ):
1135
1137
# Multi turn chat
1136
- model = generative_models .GenerativeModel ("gemini-pro " )
1138
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
1137
1139
chat = model .start_chat ()
1138
1140
1139
1141
self .responses ["stream_generate_content" ] = [
@@ -1148,7 +1150,7 @@ def test_repr_for_incomplete_streaming_chat(self):
1148
1150
"""\
1149
1151
ChatSession(
1150
1152
model=genai.GenerativeModel(
1151
- model_name='models/gemini-pro ',
1153
+ model_name='models/gemini-1.5-flash ',
1152
1154
generation_config={},
1153
1155
safety_settings={},
1154
1156
tools=None,
@@ -1162,7 +1164,7 @@ def test_repr_for_incomplete_streaming_chat(self):
1162
1164
1163
1165
def test_repr_for_broken_streaming_chat (self ):
1164
1166
# Multi turn chat
1165
- model = generative_models .GenerativeModel ("gemini-pro " )
1167
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
1166
1168
chat = model .start_chat ()
1167
1169
1168
1170
self .responses ["stream_generate_content" ] = [
@@ -1193,7 +1195,7 @@ def test_repr_for_broken_streaming_chat(self):
1193
1195
"""\
1194
1196
ChatSession(
1195
1197
model=genai.GenerativeModel(
1196
- model_name='models/gemini-pro ',
1198
+ model_name='models/gemini-1.5-flash ',
1197
1199
generation_config={},
1198
1200
safety_settings={},
1199
1201
tools=None,
@@ -1206,7 +1208,9 @@ def test_repr_for_broken_streaming_chat(self):
1206
1208
self .assertEqual (expected , result )
1207
1209
1208
1210
def test_repr_for_system_instruction (self ):
1209
- model = generative_models .GenerativeModel ("gemini-pro" , system_instruction = "Be excellent." )
1211
+ model = generative_models .GenerativeModel (
1212
+ "gemini-1.5-flash" , system_instruction = "Be excellent."
1213
+ )
1210
1214
result = repr (model )
1211
1215
self .assertIn ("system_instruction='Be excellent.'" , result )
1212
1216
@@ -1237,7 +1241,7 @@ def test_chat_with_request_options(self):
1237
1241
)
1238
1242
request_options = {"timeout" : 120 }
1239
1243
1240
- model = generative_models .GenerativeModel ("gemini-pro " )
1244
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
1241
1245
chat = model .start_chat ()
1242
1246
chat .send_message ("hello" , request_options = helper_types .RequestOptions (** request_options ))
1243
1247
0 commit comments