Skip to content

Commit 470e8b5

Browse files
pudepiedjjordankanter
authored andcommitted
main : print total token count and tokens consumed so far (ggml-org#4874)
* Token count changes * Add show token count * Updating before PR * Two requested changes * Move param def posn
1 parent 4e8ee2f commit 470e8b5

File tree

4 files changed

+15
-3
lines changed

4 files changed

+15
-3
lines changed

common/common.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -630,6 +630,12 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
630630
break;
631631
}
632632
params.ppl_stride = std::stoi(argv[i]);
633+
} else if (arg == "-stc" || arg == "--show_token_count") {
634+
if (++i >= argc) {
635+
invalid_param = true;
636+
break;
637+
}
638+
params.token_interval = std::stoi(argv[i]);
633639
} else if (arg == "--ppl-output-type") {
634640
if (++i >= argc) {
635641
invalid_param = true;
@@ -944,6 +950,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
944950
printf(" --override-kv KEY=TYPE:VALUE\n");
945951
printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
946952
printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
953+
printf(" -stc N --show_token_count N\n");
954+
printf(" show consumed tokens every N tokens\n");
947955
printf("\n");
948956
#ifndef LOG_DISABLE_LOGS
949957
log_print_usage();

common/common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ struct gpt_params {
6464
int32_t n_beams = 0; // if non-zero then use beam search of given width.
6565
int32_t grp_attn_n = 1; // group-attention factor
6666
int32_t grp_attn_w = 512; // group-attention width
67+
int32_t token_interval = 512; // show token count every 512 tokens
6768
float rope_freq_base = 0.0f; // RoPE base frequency
6869
float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
6970
float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor
@@ -242,4 +243,3 @@ void dump_kv_cache_view(const llama_kv_cache_view & view, int row_size = 80);
242243

243244
// Dump the KV cache view showing individual sequences in each cell (long output).
244245
void dump_kv_cache_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
245-

examples/main/main.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -500,7 +500,7 @@ int main(int argc, char ** argv) {
500500
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
501501
// predict
502502
if (!embd.empty()) {
503-
// Note: n_ctx - 4 here is to match the logic for commandline prompt handling via
503+
// Note: (n_ctx - 4) here is to match the logic for commandline prompt handling via
504504
// --prompt or --file which uses the same value.
505505
int max_embd_size = n_ctx - 4;
506506

@@ -650,6 +650,10 @@ int main(int argc, char ** argv) {
650650
n_past += n_eval;
651651

652652
LOG("n_past = %d\n", n_past);
653+
// Display total tokens alongside total time
654+
if (n_past % params.token_interval == 0) {
655+
printf("\n\033[31mTokens consumed so far = %d / %d \033[0m\n", n_past, n_ctx);
656+
}
653657
}
654658

655659
if (!embd.empty() && !path_session.empty()) {

llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10921,7 +10921,7 @@ void llama_print_timings(struct llama_context * ctx) {
1092110921
__func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
1092210922
LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
1092310923
__func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
10924-
LLAMA_LOG_INFO("%s: total time = %10.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
10924+
LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (timings.t_end_ms - timings.t_start_ms), (timings.n_p_eval + timings.n_eval));
1092510925
}
1092610926

1092710927
void llama_reset_timings(struct llama_context * ctx) {

0 commit comments

Comments
 (0)