From eed87ffe7fcb79137bd1d12518d30565d0368424 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Sat, 29 Jun 2024 13:46:58 -0400 Subject: [PATCH 1/3] Add cache_from_name and cache_chat --- samples/cache.py | 35 +++++++++++++++++++++++++++++++++++ samples/chat.py | 9 ++++++--- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/samples/cache.py b/samples/cache.py index 16beda976..c77b63e58 100644 --- a/samples/cache.py +++ b/samples/cache.py @@ -96,6 +96,41 @@ def test_cache_update(self): # [END cache_update] cache.delete() + def test_cache_create_from_name(self): + # [START cache_create_from_name] + document = genai.upload_file(path=media / "a11.txt") + model_name = "gemini-1.5-flash-001" + cache = genai.caching.CachedContent.create( + model=model_name, + system_instruction="You are an expert analyzing transcripts.", + contents=[document], + ) + apollo_model = genai.caching.CachedContent.from_cached_content(cache) + response = apollo_model.generate_content("Find a lighthearted moment from this transcript") + print(response.text) + # [END cache_create_from_name] + cache.delete() + + def test_cache_chat(self): + # [START cache_chat] + document = genai.upload_file(path=media / "a11.txt") + model_name = "gemini-1.5-flash-001" + cache = genai.caching.CachedContent.create( + model=model_name, + system_instruction="You are an expert analyzing transcripts.", + contents=[document], + ) + apollo_model = genai.GenerativeModel.from_cached_content(cached_content=cache) + chat = apollo_model.start_chat() + response = chat.send_message( + "Give me a quote from the most important part of the transcript." + ) + print(response.text) + response = chat.send_message("What was recounted after that?") + print(response.text) + # [END cache_chat] + cache.delete() + if __name__ == "__main__": absltest.main() diff --git a/samples/chat.py b/samples/chat.py index 5958979cc..5089450d9 100644 --- a/samples/chat.py +++ b/samples/chat.py @@ -31,7 +31,7 @@ def test_chat(self): ] ) response = chat.send_message("I have 2 dogs in my house.") - print(response.text) + print(response.text) response = chat.send_message("How many paws are in my house?") print(response.text) # [END chat] @@ -62,11 +62,14 @@ def test_chat_streaming_with_images(self): model = genai.GenerativeModel("gemini-1.5-flash") chat = model.start_chat() - response = chat.send_message("Hello, I'm interested in learning about musical instruments. Can I show you one?", stream=True) + response = chat.send_message( + "Hello, I'm interested in learning about musical instruments. Can I show you one?", + stream=True, + ) for chunk in response: print(chunk.text) # Yes. print("_" * 80) - + organ = genai.upload_file(media / "organ.jpg") response = chat.send_message( ["What family of intruments does this instrument belong to?", organ], stream=True From 8fd39b9c9f7fe959243db676f72bac015eced724 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Tue, 2 Jul 2024 14:56:30 -0700 Subject: [PATCH 2/3] Resolve comments. Change-Id: I74aa097499fc426e4e39327c2ffcdcf3f68534dd --- samples/cache.py | 36 +++++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/samples/cache.py b/samples/cache.py index c77b63e58..f02189912 100644 --- a/samples/cache.py +++ b/samples/cache.py @@ -32,6 +32,10 @@ def test_cache_create(self): contents=[document], ) print(cache) + + model = genai.GenerativeModel.from_cached_content(cache) + response = model.generate_content("Please summarize this transcript") + print(response.text) # [END cache_create] cache.delete() @@ -105,6 +109,10 @@ def test_cache_create_from_name(self): system_instruction="You are an expert analyzing transcripts.", contents=[document], ) + cache_name = cache.name # Save the name for later + + # Later + cache = genai.caching.CachedContent.get(cache_name) apollo_model = genai.caching.CachedContent.from_cached_content(cache) response = apollo_model.generate_content("Find a lighthearted moment from this transcript") print(response.text) @@ -113,21 +121,31 @@ def test_cache_create_from_name(self): def test_cache_chat(self): # [START cache_chat] + model_name='gemini-1.5-flash' + system_instruction = "You are an expert analyzing transcripts." + + model = genai.GenerativeModel(model_name=model_name, system_instruction=system_instruction) + chat = model.start_chat() document = genai.upload_file(path=media / "a11.txt") - model_name = "gemini-1.5-flash-001" + response = chat.send_message(["Hi, could you summarize this transcript?", document]) + print('\n\nmodel: ', response.text) + response = chat.send_message(['Okay, could you tell me more about the trans-lunar injection']) + print('\n\nmodel: ', response.text) + + # To cache the conversation so far, pass the chat history as the list of "contents". cache = genai.caching.CachedContent.create( model=model_name, - system_instruction="You are an expert analyzing transcripts.", - contents=[document], + system_instruction=system_instruction, + contents=chat.history, ) - apollo_model = genai.GenerativeModel.from_cached_content(cached_content=cache) - chat = apollo_model.start_chat() + model = genai.GenerativeModel.from_cached_content(cached_content=cache) + + # Continue the chat where you left off. + chat = model.start_chat() response = chat.send_message( - "Give me a quote from the most important part of the transcript." + "I didn't understand that last part, could you explain it in simpler language?" ) - print(response.text) - response = chat.send_message("What was recounted after that?") - print(response.text) + print('\n\nmodel: ', response.text) # [END cache_chat] cache.delete() From 8fda4269f28271ce36a434dd8895c94a1a032e1e Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Tue, 2 Jul 2024 15:48:39 -0700 Subject: [PATCH 3/3] update cache sample Change-Id: I1b261cdcca6e564471bb8ca5a59e9138d4a5f253 --- samples/cache.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/samples/cache.py b/samples/cache.py index f02189912..b7edc5de3 100644 --- a/samples/cache.py +++ b/samples/cache.py @@ -96,7 +96,7 @@ def test_cache_update(self): print(f"After update:\n {cache}") # Or you can update the expire_time - cache.update(expire_time=datetime.now() + datetime.timedelta(minutes=15)) + cache.update(expire_time=datetime.datetime.now() + datetime.timedelta(minutes=15)) # [END cache_update] cache.delete() @@ -113,7 +113,7 @@ def test_cache_create_from_name(self): # Later cache = genai.caching.CachedContent.get(cache_name) - apollo_model = genai.caching.CachedContent.from_cached_content(cache) + apollo_model = genai.GenerativeModel.from_cached_content(cache) response = apollo_model.generate_content("Find a lighthearted moment from this transcript") print(response.text) # [END cache_create_from_name] @@ -121,16 +121,18 @@ def test_cache_create_from_name(self): def test_cache_chat(self): # [START cache_chat] - model_name='gemini-1.5-flash' + model_name = "gemini-1.5-flash-001" system_instruction = "You are an expert analyzing transcripts." model = genai.GenerativeModel(model_name=model_name, system_instruction=system_instruction) chat = model.start_chat() document = genai.upload_file(path=media / "a11.txt") response = chat.send_message(["Hi, could you summarize this transcript?", document]) - print('\n\nmodel: ', response.text) - response = chat.send_message(['Okay, could you tell me more about the trans-lunar injection']) - print('\n\nmodel: ', response.text) + print("\n\nmodel: ", response.text) + response = chat.send_message( + ["Okay, could you tell me more about the trans-lunar injection"] + ) + print("\n\nmodel: ", response.text) # To cache the conversation so far, pass the chat history as the list of "contents". cache = genai.caching.CachedContent.create( @@ -145,7 +147,7 @@ def test_cache_chat(self): response = chat.send_message( "I didn't understand that last part, could you explain it in simpler language?" ) - print('\n\nmodel: ', response.text) + print("\n\nmodel: ", response.text) # [END cache_chat] cache.delete()