Skip to content

Commit 802ef4c

Browse files
committed
tests : add falcon tests (py + cpp, currently do not pass Unicode)
1 parent dbcf470 commit 802ef4c

5 files changed

+187
-3
lines changed

tests/CMakeLists.txt

+4-2
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,10 @@ endfunction()
2525
llama_build_and_test_executable(test-quantize-fns.cpp)
2626
llama_build_and_test_executable(test-quantize-perf.cpp)
2727
llama_build_and_test_executable(test-sampling.cpp)
28-
llama_build_executable(test-tokenizer-0.cpp)
29-
llama_test_executable (test-tokenizer-0.llama test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
28+
llama_build_executable(test-tokenizer-0-llama.cpp)
29+
llama_test_executable (test-tokenizer-0-llama test-tokenizer-0-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
30+
llama_build_executable(test-tokenizer-0-falcon.cpp)
31+
#llama_test_executable (test-tokenizer-0-falcon test-tokenizer-0-falcon.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
3032
llama_build_executable(test-tokenizer-1.cpp)
3133
# test-tokenizer-1 requires a BPE vocab. re-enable when we have one.
3234
#llama_test_executable (test-tokenizer-1.llama test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)

tests/test-tokenizer-0-falcon.cpp

+180
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,180 @@
1+
#include "llama.h"
2+
#include "common.h"
3+
4+
#include <cstdio>
5+
#include <string>
6+
#include <map>
7+
#include <vector>
8+
#include <fstream>
9+
10+
// generate using test-tokenizer-0-falcon.py
11+
static const std::map<std::string, std::vector<llama_token>> & k_tests() {
12+
static std::map<std::string, std::vector<llama_token>> _k_tests = {
13+
{ "" , { }, },
14+
{ " " , { 204, }, },
15+
{ " " , { 258, }, },
16+
{ " " , { 466, }, },
17+
{ "\t" , { 192, }, },
18+
{ "\n" , { 193, }, },
19+
{ "\t\n" , { 19125, }, },
20+
{ "Hello world" , { 9856, 1079, }, },
21+
{ " Hello world" , { 23090, 1079, }, },
22+
{ "Hello World" , { 9856, 2889, }, },
23+
{ " Hello World" , { 23090, 2889, }, },
24+
{ " Hello World!" , { 23090, 2889, 12, }, },
25+
{ "Hello, world!" , { 9856, 23, 1079, 12, }, },
26+
{ " Hello, world!" , { 23090, 23, 1079, 12, }, },
27+
{ " this is 🦙.cpp" , { 414, 304, 3346, 111, 231, 25, 29247, }, },
28+
{ "w048 7tuijk dsdfhu" , { 98, 55866, 204, 34, 16682, 7149, 36190, 6869, 11481, }, },
29+
{ "нещо на Български" , { 150, 133, 6207, 151, 215, 150, 134, 5052, 133, 6279, 5052, 223, 151, 216, 49679, 123, 53110, 47043, 7795, }, },
30+
{ "កាន់តែពិសេសអាចខលចេញ" , { 38154, 206, 38154, 126, 38154, 225, 167, 237, 217, 38154, 221, 167, 237, 208, 38154, 228, 38154, 127, 38154, 237, 167, 237, 207, 38154, 237, 38154, 107, 38154, 126, 38154, 211, 38154, 207, 38154, 233, 38154, 211, 167, 237, 207, 38154, 215, }, },
31+
{ "🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)", { 2571, 232, 206, 204, 19, 11003, 20, 8196, 126, 283, 219, 48778, 116, 13392, 204, 19, 51831, 732, 63209, 1741, 7955, 522, 20, 22438, 211, 204, 19, 7927, 53360, 325, 504, 701, 946, 10930, 20, }, },
32+
{ "Hello" , { 9856, }, },
33+
{ " Hello" , { 23090, }, },
34+
{ " Hello" , { 204, 23090, }, },
35+
{ " Hello" , { 258, 23090, }, },
36+
{ " Hello" , { 466, 23090, }, },
37+
{ " Hello\n Hello" , { 466, 23090, 742, 23090, }, },
38+
};
39+
40+
return _k_tests;
41+
}
42+
43+
int main(int argc, char **argv) {
44+
if (argc < 2) {
45+
fprintf(stderr, "Usage: %s vocab-file [text-file]\n", argv[0]);
46+
return 1;
47+
}
48+
49+
const std::string fname = argv[1];
50+
51+
std::string fname_text;
52+
if (argc > 2) {
53+
fname_text = argv[2];
54+
}
55+
56+
fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
57+
58+
llama_model * model;
59+
llama_context * ctx;
60+
61+
llama_backend_init(false);
62+
63+
// load the vocab
64+
{
65+
auto lparams = llama_context_default_params();
66+
67+
lparams.vocab_only = true;
68+
69+
model = llama_load_model_from_file(fname.c_str(), lparams);
70+
71+
if (model == NULL) {
72+
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
73+
return 1;
74+
}
75+
76+
ctx = llama_new_context_with_model(model, lparams);
77+
78+
if (ctx == NULL) {
79+
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
80+
llama_free_model(model);
81+
return 1;
82+
}
83+
}
84+
85+
const int n_vocab = llama_n_vocab(ctx);
86+
87+
if (n_vocab != 65024) {
88+
fprintf(stderr, "%s : expected 65024 tokens, got %d\n", __func__, n_vocab);
89+
llama_free_model(model);
90+
llama_free(ctx);
91+
return 2;
92+
}
93+
94+
bool success = true;
95+
96+
for (const auto & test_kv : k_tests()) {
97+
const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, false);
98+
99+
printf("\n");
100+
printf("src: '%s'\n", test_kv.first.c_str());
101+
printf("res: '%s'\n", llama_detokenize(ctx, res).c_str());
102+
printf("tok: ");
103+
for (const auto & tok : res) {
104+
printf("%d ", tok);
105+
}
106+
printf("\n");
107+
108+
bool correct = res.size() == test_kv.second.size();
109+
110+
for (int i = 0; i < (int) res.size() && correct; ++i) {
111+
if (test_kv.second[i] != res[i]) {
112+
correct = false;
113+
}
114+
}
115+
116+
if (!correct) {
117+
fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
118+
fprintf(stderr, "%s : detokenized to: '%s' instead of '%s'\n", __func__,
119+
llama_detokenize(ctx, res).c_str(),
120+
llama_detokenize(ctx, test_kv.second).c_str());
121+
fprintf(stderr, "%s : expected tokens: ", __func__);
122+
for (const auto & t : test_kv.second) {
123+
fprintf(stderr, "%6d, ", t);
124+
}
125+
fprintf(stderr, "\n");
126+
fprintf(stderr, "%s : got tokens: ", __func__);
127+
for (const auto & t : res) {
128+
fprintf(stderr, "%6d, ", t);
129+
}
130+
fprintf(stderr, "\n");
131+
132+
success = false;
133+
}
134+
}
135+
136+
if (!fname_text.empty()) {
137+
fprintf(stderr, "%s : tokenizing: '%s'\n", __func__, fname_text.c_str());
138+
139+
std::string text;
140+
{
141+
std::ifstream ifs(fname_text);
142+
if (!ifs) {
143+
fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_text.c_str());
144+
return 1;
145+
}
146+
text = std::string(std::istreambuf_iterator<char>(ifs), std::istreambuf_iterator<char>());
147+
}
148+
149+
fprintf(stderr, "%s : text size: %zu\n", __func__, text.size());
150+
151+
const std::vector<llama_token> res = llama_tokenize(ctx, text, true);
152+
153+
fprintf(stderr, "%s : tokens: %zu\n", __func__, res.size());
154+
155+
{
156+
const std::string fname_out = fname_text + ".tokcpp";
157+
158+
std::ofstream ofs(fname_out);
159+
if (!ofs) {
160+
fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_out.c_str());
161+
return 1;
162+
}
163+
164+
for (const auto & tok : res) {
165+
ofs << tok << " ";
166+
}
167+
168+
ofs << "\n";
169+
}
170+
171+
fprintf(stderr, "%s : tokens written to '%s'\n", __func__, (fname_text + ".tokcpp").c_str());
172+
}
173+
174+
llama_free_model(model);
175+
llama_free(ctx);
176+
177+
llama_backend_free();
178+
179+
return success ? 0 : 3;
180+
}
File renamed without changes.

tests/test-tokenizer-0.cpp renamed to tests/test-tokenizer-0-llama.cpp

+3-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
#include <vector>
88
#include <fstream>
99

10-
// generate using test-tokenizer-0.py
10+
// generate using test-tokenizer-0-llama.py
1111
static const std::map<std::string, std::vector<llama_token>> & k_tests() {
1212
static std::map<std::string, std::vector<llama_token>> _k_tests = {
1313
{ "" , { }, },
@@ -171,6 +171,8 @@ int main(int argc, char **argv) {
171171

172172
ofs << "\n";
173173
}
174+
175+
fprintf(stderr, "%s : tokens written to '%s'\n", __func__, (fname_text + ".tokcpp").c_str());
174176
}
175177

176178
llama_free_model(model);
File renamed without changes.

0 commit comments

Comments
 (0)