Skip to content

Commit daab3d7

Browse files
authored
Add more tokenizer tests (#3742)
* Add more tokenizer tests * Add starcoder * Update test vocab files * Restrict bpe tokenizer tests to unicode planes * Update comment * Comment cosmetics * Remove bloom vocab/test
1 parent 469c9ad commit daab3d7

6 files changed

+16
-3
lines changed

Diff for: models/ggml-vocab-baichuan.gguf

1.28 MB
Binary file not shown.

Diff for: models/ggml-vocab-gpt-neox.gguf

1.69 MB
Binary file not shown.

Diff for: models/ggml-vocab-refact.gguf

1.64 MB
Binary file not shown.

Diff for: models/ggml-vocab-starcoder.gguf

1.64 MB
Binary file not shown.

Diff for: tests/CMakeLists.txt

+4
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,14 @@ llama_build_executable(test-tokenizer-0-falcon.cpp)
2828
llama_test_executable (test-tokenizer-0-falcon test-tokenizer-0-falcon.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
2929
llama_build_executable(test-tokenizer-1-llama.cpp)
3030
llama_test_executable (test-tokenizer-1-llama test-tokenizer-1-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
31+
llama_test_executable(test-tokenizer-1-baichuan test-tokenizer-1-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf)
3132
llama_build_executable(test-tokenizer-1-bpe.cpp)
3233
llama_test_executable (test-tokenizer-1-falcon test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
3334
llama_test_executable(test-tokenizer-1-aquila test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
3435
llama_test_executable(test-tokenizer-1-mpt test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
36+
llama_test_executable(test-tokenizer-1-gpt-neox test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-neox.gguf)
37+
llama_test_executable(test-tokenizer-1-refact test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
38+
llama_test_executable(test-tokenizer-1-starcoder test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
3539
llama_build_and_test_executable(test-grammar-parser.cpp)
3640
llama_build_and_test_executable(test-llama-grammar.cpp)
3741
llama_build_and_test_executable(test-grad0.cpp) # SLOW

Diff for: tests/test-tokenizer-1-bpe.cpp

+12-3
Original file line numberDiff line numberDiff line change
@@ -91,9 +91,19 @@ int main(int argc, char **argv) {
9191
}
9292
}
9393
}
94-
// TODO: why doesn't this work for the full range of Unicodes?
94+
// Restrict to assigned unicode planes
9595
// for (uint32_t cp = 0x10000; cp < 0x0010ffff; ++cp) {
96-
for (uint32_t cp = 0x10000; cp < 0x00080000; ++cp) {
96+
for (uint32_t cp = 0x10000; cp < 0x00040000; ++cp) {
97+
std::string str = codepoint_to_utf8(cp);
98+
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
99+
std::string check = llama_detokenize_bpe(ctx, tokens);
100+
if (str != check) {
101+
fprintf(stderr, "%s : error: codepoint %x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
102+
__func__, cp, check.c_str(), check.length(), str.c_str(), str.length());
103+
return 4;
104+
}
105+
}
106+
for (uint32_t cp = 0x000e0000; cp < 0x0010ffff; ++cp) {
97107
std::string str = codepoint_to_utf8(cp);
98108
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
99109
std::string check = llama_detokenize_bpe(ctx, tokens);
@@ -103,7 +113,6 @@ int main(int argc, char **argv) {
103113
return 4;
104114
}
105115
}
106-
107116
llama_free_model(model);
108117
llama_free(ctx);
109118

0 commit comments

Comments
 (0)