Skip to content

Commit f5e5888

Browse files
committed
# This is a combination of 16 commits.
# This is the 1st commit message: refactor: updating codeowners # This is the commit message #2: add chat functions # This is the commit message #3: use correct testing project # This is the commit message #4: refactor: adding system tests + updating corresponding chat samples # This is the commit message #5: add countTokens sample # This is the commit message #6: refactor: adding in region tags, abstracting out mimetype, adding new image ur # This is the commit message #7: refactor: updating gs url in test, fix to args getting passed to sample functions # This is the commit message #8: refactor: resolving file paths in tests, adding wait helper function # This is the commit message #9: add warning about safety concerns # This is the commit message #10: refactor:filling out nonstreamingchat and streamcontent tests # This is the commit message #11: add countTokens test # This is the commit message #12: refactor: filling out more streaming tests # This is the commit message #13: add safety settings test # This is the commit message #14: refactor: adding in stream content and multipart content tests # This is the commit message #15: feat: add new sendMultiModalPromptWithImage sample # This is the commit message #16: refactor: adding region tags
1 parent dc9370f commit f5e5888

21 files changed

+1052
-4
lines changed

CODEOWNERS

+1
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ monitoring/opencensus @GoogleCloudPlatform/nodejs-samples-reviewers
5050

5151
# Data & AI
5252
ai-platform @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers
53+
generative-ai @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers
5354
automl @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers
5455
cloud-language @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers
5556
contact-center-insights @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/nodejs-samples-reviewers

generative-ai/snippets/countTokens.js

+52
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
// Copyright 2023 Google LLC
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// https://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
const { VertexAI } = require('@google-cloud/vertexai');
16+
17+
async function countTokens(
18+
projectId = 'PROJECT_ID',
19+
location = 'LOCATION_ID',
20+
model = 'MODEL'
21+
) {
22+
// [START aiplatform_gemini_token_count]
23+
24+
/**
25+
* TODO(developer): Uncomment these variables before running the sample.
26+
*/
27+
// const projectId = 'your-project-id';
28+
// const location = 'us-central1';
29+
// const model = 'gemini-pro';
30+
31+
// Initialize Vertex with your Cloud project and location
32+
const vertex_ai = new VertexAI({ project: projectId, location: location });
33+
34+
// Instantiate the model
35+
const generativeModel = vertex_ai.preview.getGenerativeModel({
36+
model: model,
37+
});
38+
39+
const req = {
40+
contents: [{ role: 'user', parts: [{ text: 'How are you doing today?' }] }],
41+
};
42+
43+
const countTokensResp = await generativeModel.countTokens(req);
44+
console.log('count tokens response: ', countTokensResp);
45+
46+
// [END aiplatform_gemini_token_count]
47+
}
48+
49+
countTokens(...process.argv.slice(2)).catch(err => {
50+
console.error(err.message);
51+
process.exitCode = 1;
52+
});

generative-ai/snippets/index.js

+124
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
const {
2+
VertexAI,
3+
HarmBlockThreshold,
4+
HarmCategory,
5+
} = require('@google-cloud/vertexai');
6+
7+
const project = 'cloud-llm-preview1';
8+
const location = 'us-central1';
9+
10+
// Initialize Vertex with your Cloud project and location
11+
const vertex_ai = new VertexAI({project: project, location: location});
12+
13+
// Instantiate the models
14+
const generativeModel = vertex_ai.preview.getGenerativeModel({
15+
model: 'gemini-pro',
16+
// The following parameters are optional
17+
// They can also be passed to individual content generation requests
18+
safety_settings: [
19+
{
20+
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
21+
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
22+
},
23+
],
24+
generation_config: {max_output_tokens: 256},
25+
});
26+
27+
const generativeVisionModel = vertex_ai.preview.getGenerativeModel({
28+
model: 'gemini-pro-vision',
29+
});
30+
31+
async function streamContentTextOnly() {
32+
const req = {
33+
contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}],
34+
};
35+
36+
const streamingResp = await generativeModel.generateContentStream(req);
37+
38+
for await (const item of streamingResp.stream) {
39+
console.log('stream chunk:', item);
40+
}
41+
42+
console.log('aggregated response: ', await streamingResp.response);
43+
}
44+
45+
async function nonStreamingTextOnly() {
46+
const req = {
47+
contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}],
48+
};
49+
50+
const nonstreamingResp = await generativeModel.generateContent(req);
51+
console.log('non-streaming response: ', await nonstreamingResp.response);
52+
}
53+
54+
async function countTokens() {
55+
const req = {
56+
contents: [{role: 'user', parts: [{text: 'How are you doing today?'}]}],
57+
};
58+
59+
const countTokensResp = await generativeModel.countTokens(req);
60+
console.log('count tokens response: ', countTokensResp);
61+
}
62+
63+
async function nonStreamingChat() {
64+
const chat = generativeModel.startChat({});
65+
const result1 = await chat.sendMessage('hello');
66+
console.log('send message result1: ', result1);
67+
const resp1 = result1.response;
68+
console.log('send message response1: ', resp1);
69+
const result2 = await chat.sendMessage('what day is it today?');
70+
console.log('result2: ', result2);
71+
const resp2 = result2.response;
72+
console.log('send message response2: ', resp2);
73+
const result3 = await chat.sendMessage('what day is it tomorrow?');
74+
console.log('result3: ', result3);
75+
const resp3 = result3.response;
76+
console.log('send message response3: ', resp3);
77+
}
78+
79+
async function streamingChat() {
80+
const chat = generativeModel.startChat({});
81+
const streamResult1 = await chat.sendMessageStream('hello again');
82+
console.log('stream result1: ', streamResult1);
83+
const streamResp1 = await streamResult1.response;
84+
console.log('stream send message response1: ', streamResp1);
85+
const streamResult2 = await chat.sendMessageStream('what is the date today?');
86+
console.log('stream result2: ', streamResult2);
87+
const streamResp2 = await streamResult2.response;
88+
console.log('stream send message response2: ', streamResp2);
89+
const streamResult3 = await chat.sendMessageStream(
90+
'what is the date tomorrow?'
91+
);
92+
console.log('stream result3: ', streamResult3);
93+
const streamResp3 = await streamResult3.response;
94+
console.log('stream send message response3: ', streamResp3);
95+
}
96+
97+
async function multiPartContent() {
98+
const filePart = {
99+
file_data: {
100+
file_uri: 'gs://sararob_imagegeneration_test/kitten.jpeg',
101+
mime_type: 'image/jpeg',
102+
},
103+
};
104+
const textPart = {text: 'What is this a picture of?'};
105+
106+
const request = {
107+
contents: [{role: 'user', parts: [textPart, filePart]}],
108+
};
109+
110+
const generativeVisionModel = vertex_ai.preview.getGenerativeModel({
111+
model: 'gemini-pro-vision',
112+
});
113+
114+
const resp = await generativeVisionModel.generateContentStream(request);
115+
const contentResponse = await resp.response;
116+
console.log(contentResponse.candidates[0].content);
117+
}
118+
119+
nonStreamingTextOnly();
120+
streamContentTextOnly();
121+
countTokens();
122+
nonStreamingChat();
123+
streamingChat();
124+
multiPartContent();
+73
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
// Copyright 2023 Google LLC
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// https://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
const {VertexAI} = require('@google-cloud/vertexai');
16+
17+
function wait(time) {
18+
return new Promise(resolve => {
19+
setTimeout(resolve, time);
20+
});
21+
}
22+
23+
async function createNonStreamingChat(
24+
projectId = 'PROJECT_ID',
25+
location = 'LOCATION_ID',
26+
model = 'MODEL'
27+
) {
28+
// TODO: Find better method. Setting delay to give api time to respond, otherwise it will 404
29+
// await wait(10);
30+
31+
// [START aiplatform_gemini_multiturn_chat]
32+
/**
33+
* TODO(developer): Uncomment these variables before running the sample.
34+
*/
35+
// const projectId = 'your-project-id';
36+
// const location = 'us-central1';
37+
38+
// Initialize Vertex with your Cloud project and location
39+
const vertexAI = new VertexAI({project: projectId, location: location});
40+
41+
// Instantiate the model
42+
const generativeModel = vertexAI.preview.getGenerativeModel({
43+
model: model,
44+
});
45+
46+
const chat = generativeModel.startChat({});
47+
48+
const chatInput1 = 'Hello';
49+
console.log(`User: ${chatInput1}`);
50+
51+
const result1 = await chat.sendMessage(chatInput1);
52+
const response1 = result1.response.candidates[0].content.parts[0].text;
53+
console.log('Chat bot: ', response1);
54+
55+
const chatInput2 = 'Can you tell me a scientific fun fact?';
56+
console.log(`User: ${chatInput2}`);
57+
const result2 = await chat.sendMessage(chatInput2);
58+
const response2 = result2.response.candidates[0].content.parts[0].text;
59+
console.log('Chat bot: ', response2);
60+
61+
const chatInput3 = 'How can I learn more about that?';
62+
console.log(`User: ${chatInput3}`);
63+
const result3 = await chat.sendMessage(chatInput3);
64+
const response3 = result3.response.candidates[0].content.parts[0].text;
65+
console.log('Chat bot: ', response3);
66+
67+
// [END aiplatform_gemini_multiturn_chat]
68+
}
69+
70+
createNonStreamingChat(...process.argv.slice(2)).catch(err => {
71+
console.error(err.message);
72+
process.exitCode = 1;
73+
});
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
// Copyright 2023 Google LLC
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// https://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
const {VertexAI} = require('@google-cloud/vertexai');
16+
17+
async function createNonStreamingContent(
18+
projectId = 'PROJECT_ID',
19+
location = 'LOCATION_ID',
20+
model = 'MODEL'
21+
) {
22+
// [START aiplatform_gemini_function_calling]
23+
24+
/**
25+
* TODO(developer): Uncomment these variables before running the sample.
26+
*/
27+
// const projectId = 'your-project-id';
28+
// const location = 'us-central1';
29+
30+
// Initialize Vertex with your Cloud project and location
31+
const vertexAI = new VertexAI({project: projectId, location: location});
32+
33+
// Instantiate the model
34+
const generativeModel = vertexAI.preview.getGenerativeModel({
35+
model: model,
36+
});
37+
38+
const request = {
39+
contents: [{role: 'user', parts: [{text: 'What is Node.js?'}]}],
40+
};
41+
42+
console.log('Prompt:');
43+
console.log(request.contents[0].parts[0].text);
44+
console.log('Non-Streaming Response Text:');
45+
46+
// Create the response stream
47+
const responseStream = await generativeModel.generateContentStream(request);
48+
49+
// Wait for the response stream to complete
50+
const aggregatedResponse = await responseStream.response;
51+
52+
// Select the text from the response
53+
const fullTextResponse =
54+
aggregatedResponse.candidates[0].content.parts[0].text;
55+
56+
console.log(fullTextResponse);
57+
58+
// [END aiplatform_gemini_function_calling]
59+
}
60+
61+
createNonStreamingContent(...process.argv.slice(2)).catch(err => {
62+
console.error(err.message);
63+
process.exitCode = 1;
64+
});

0 commit comments

Comments
 (0)