Skip to content

Commit 8a58d77

Browse files
authored
Add count tokens standard samples (#193)
Add the standard samples, except for those which rely on support for files or explicit caching.
1 parent 9d2f9ea commit 8a58d77

File tree

1 file changed

+162
-0
lines changed

1 file changed

+162
-0
lines changed

samples/dart/bin/count_tokens.dart

+162
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,162 @@
1+
// Copyright 2024 Google LLC
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
import 'dart:io';
16+
17+
import 'package:google_generative_ai/google_generative_ai.dart';
18+
19+
final apiKey = () {
20+
final apiKey = Platform.environment['GEMINI_API_KEY'];
21+
if (apiKey == null) {
22+
stderr.writeln(r'No $GEMINI_API_KEY environment variable');
23+
exit(1);
24+
}
25+
return apiKey;
26+
}();
27+
28+
Future<void> tokensTextOnly() async {
29+
// [START tokens_text_only]
30+
final model = GenerativeModel(
31+
model: 'gemini-1.5-flash',
32+
apiKey: apiKey,
33+
);
34+
final prompt = 'The quick brown fox jumps over the lazy dog.';
35+
final tokenCount = await model.countTokens([Content.text(prompt)]);
36+
print('Total tokens: ${tokenCount.totalTokens}');
37+
// [END tokens_text_only]
38+
}
39+
40+
Future<void> tokensChat() async {
41+
// [START tokens_chat]
42+
final model = GenerativeModel(
43+
model: 'gemini-1.5-flash',
44+
apiKey: apiKey,
45+
);
46+
final chat = model.startChat(history: [
47+
Content.text('Hi my name is Bob'),
48+
Content.model([TextPart('Hi Bob!')])
49+
]);
50+
var tokenCount = await model.countTokens(chat.history);
51+
print('Total tokens: ${tokenCount.totalTokens}');
52+
53+
final response = await chat.sendMessage(Content.text(
54+
'In one sentence, explain how a computer works to a young child.'));
55+
if (response.usageMetadata case final usage?) {
56+
print('Prompt: ${usage.promptTokenCount}, '
57+
'Candidates: ${usage.candidatesTokenCount}, '
58+
'Total: ${usage.totalTokenCount}');
59+
}
60+
61+
tokenCount = await model.countTokens(
62+
[...chat.history, Content.text('What is the meaning of life?')]);
63+
print('Total tokens: ${tokenCount.totalTokens}');
64+
// [END tokens_chat]
65+
}
66+
67+
Future<void> tokensMultimodalImageInline() async {
68+
// [START tokens_multimodel_image_inline]
69+
final model = GenerativeModel(
70+
model: 'gemini-1.5-flash',
71+
apiKey: apiKey,
72+
);
73+
74+
Future<DataPart> fileToPart(String mimeType, String path) async {
75+
return DataPart(mimeType, await File(path).readAsBytes());
76+
}
77+
78+
final prompt = 'Tell me about this image';
79+
final image = await fileToPart('image/jpeg', 'resources/organ.jpg');
80+
final content = Content.multi([TextPart(prompt), image]);
81+
82+
// An image's display size does not affet its token count.
83+
// Optionally, you can call `countTokens` for the prompt and file separately.
84+
final tokenCount = await model.countTokens([content]);
85+
print('Total tokens: ${tokenCount.totalTokens}');
86+
87+
final response = await model.generateContent([content]);
88+
if (response.usageMetadata case final usage?) {
89+
print('Prompt: ${usage.promptTokenCount}, '
90+
'Candidates: ${usage.candidatesTokenCount}, '
91+
'Total: ${usage.totalTokenCount}');
92+
}
93+
// [END tokens_multimodel_image_inline]
94+
}
95+
96+
Future<void> tokensSystemInstructions() async {
97+
// [START tokens_system_instructions]
98+
var model = GenerativeModel(
99+
model: 'gemini-1.5-flash',
100+
apiKey: apiKey,
101+
);
102+
final prompt = 'The quick brown fox jumps over the lazy dog.';
103+
104+
// The total token count includes everything sent in the `generateContent`
105+
// request.
106+
var tokenCount = await model.countTokens([Content.text(prompt)]);
107+
print('Total tokens: ${tokenCount.totalTokens}');
108+
model = GenerativeModel(
109+
model: 'gemini-1.5-flash',
110+
apiKey: apiKey,
111+
systemInstruction: Content.system('You are a cat. Your name is Neko.'),
112+
);
113+
tokenCount = await model.countTokens([Content.text(prompt)]);
114+
print('Total tokens: ${tokenCount.totalTokens}');
115+
// [END tokens_system_instructions]
116+
}
117+
118+
Future<void> tokensTools() async {
119+
// [START tokens_tools]
120+
var model = GenerativeModel(
121+
model: 'gemini-1.5-flash',
122+
apiKey: apiKey,
123+
);
124+
final prompt = 'I have 57 cats, each owns 44 mittens, '
125+
'how many mittens is that in total?';
126+
127+
// The total token count includes everything sent in the `generateContent`
128+
// request.
129+
var tokenCount = await model.countTokens([Content.text(prompt)]);
130+
print('Total tokens: ${tokenCount.totalTokens}');
131+
final binaryFunction = Schema.object(
132+
properties: {
133+
'a': Schema.number(nullable: false),
134+
'b': Schema.number(nullable: false)
135+
},
136+
requiredProperties: ['a', 'b'],
137+
);
138+
139+
model = GenerativeModel(
140+
model: 'gemini-1.5-flash',
141+
apiKey: apiKey,
142+
tools: [
143+
Tool(functionDeclarations: [
144+
FunctionDeclaration('add', 'returns a + b', binaryFunction),
145+
FunctionDeclaration('subtract', 'returns a - b', binaryFunction),
146+
FunctionDeclaration('multipley', 'returns a * b', binaryFunction),
147+
FunctionDeclaration('divide', 'returns a / b', binaryFunction)
148+
])
149+
],
150+
);
151+
tokenCount = await model.countTokens([Content.text(prompt)]);
152+
print('Total tokens: ${tokenCount.totalTokens}');
153+
// [END tokens_tools]
154+
}
155+
156+
void main() async {
157+
await tokensTextOnly();
158+
await tokensChat();
159+
await tokensMultimodalImageInline();
160+
await tokensSystemInstructions();
161+
await tokensTools();
162+
}

0 commit comments

Comments
 (0)