Skip to content

Commit 71ca2fa

Browse files
authored
whisper : tokenizer fix + re-enable tokenizer test for LLaMa (#3096)
* Fix für #2721 * Reenable tokenizer test for LLaMa * Add `console.cpp` dependency * Fix dependency to `common` * Fixing wrong fix. * Make console usage platform specific Work on compiler warnings. * Adapting makefile * Remove trailing whitespace * Adapting the other parts of the makefile * Fix typo.
1 parent 1b6c650 commit 71ca2fa

6 files changed

+142
-118
lines changed

Makefile

+3-3
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple save-load-state server embd-input-test gguf llama-bench baby-llama beam-search speculative tests/test-c.o
33

44
# Binaries only useful for tests
5-
TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama tests/test-tokenizer-0-falcon tests/test-tokenizer-1
5+
TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama tests/test-tokenizer-0-falcon tests/test-tokenizer-1-llama
66

77
# Code coverage output files
88
COV_TARGETS = *.gcno tests/*.gcno *.gcda tests/*.gcda *.gcov tests/*.gcov lcov-report gcovr-report
@@ -49,7 +49,7 @@ test: $(TEST_TARGETS)
4949
./$$test_target $(CURDIR)/models/ggml-vocab-llama.gguf; \
5050
elif [ "$$test_target" = "tests/test-tokenizer-0-falcon" ]; then \
5151
continue; \
52-
elif [ "$$test_target" = "tests/test-tokenizer-1" ]; then \
52+
elif [ "$$test_target" = "tests/test-tokenizer-1-llama" ]; then \
5353
continue; \
5454
else \
5555
echo "Running test $$test_target..."; \
@@ -605,7 +605,7 @@ tests/test-tokenizer-0-falcon: tests/test-tokenizer-0-falcon.cpp build-info.h gg
605605
tests/test-tokenizer-0-llama: tests/test-tokenizer-0-llama.cpp build-info.h ggml.o llama.o common.o $(OBJS)
606606
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
607607

608-
tests/test-tokenizer-1: tests/test-tokenizer-1.cpp build-info.h ggml.o llama.o common.o $(OBJS)
608+
tests/test-tokenizer-1-llama: tests/test-tokenizer-1-llama.cpp build-info.h ggml.o llama.o common.o $(OBJS)
609609
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
610610

611611
tests/test-c.o: tests/test-c.c llama.h

llama.cpp

+3-4
Original file line numberDiff line numberDiff line change
@@ -3121,10 +3121,9 @@ struct llm_tokenizer_spm {
31213121
while (offs < text.size()) {
31223122
llm_symbol sym;
31233123
size_t len = utf8_len(text[offs]);
3124-
GGML_ASSERT(offs + len <= text.size());
31253124
sym.text = text.c_str() + offs;
3126-
sym.n = len;
3127-
offs += len;
3125+
sym.n = std::min(len, text.size() - offs);
3126+
offs += sym.n;
31283127
sym.prev = index - 1;
31293128
sym.next = offs == text.size() ? -1 : index + 1;
31303129
index++;
@@ -6218,7 +6217,7 @@ int llama_tokenize_with_model(
62186217
auto res = llama_tokenize_internal(model->vocab, text, add_bos);
62196218

62206219
if (n_max_tokens < (int) res.size()) {
6221-
LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
6220+
// LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
62226221
return -((int) res.size());
62236222
}
62246223

tests/CMakeLists.txt

+2-3
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,8 @@ llama_build_executable(test-tokenizer-0-llama.cpp)
2929
llama_test_executable (test-tokenizer-0-llama test-tokenizer-0-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
3030
llama_build_executable(test-tokenizer-0-falcon.cpp)
3131
#llama_test_executable (test-tokenizer-0-falcon test-tokenizer-0-falcon.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
32-
llama_build_executable(test-tokenizer-1.cpp)
33-
# test-tokenizer-1 requires a BPE vocab. re-enable when we have one.
34-
#llama_test_executable (test-tokenizer-1.llama test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
32+
llama_build_executable(test-tokenizer-1-llama.cpp)
33+
llama_test_executable (test-tokenizer-1-llama test-tokenizer-1-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
3534
#llama_test_executable(test-tokenizer-1.aquila test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
3635
llama_build_and_test_executable(test-grammar-parser.cpp)
3736
llama_build_and_test_executable(test-llama-grammar.cpp)

tests/test-tokenizer-0-llama.cpp

+7
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
#include "llama.h"
22
#include "common.h"
3+
#include "console.h"
34

45
#include <cstdio>
56
#include <string>
@@ -89,6 +90,12 @@ int main(int argc, char **argv) {
8990
return 2;
9091
}
9192

93+
#ifdef _WIN32
94+
// We need this for unicode console support
95+
console::init(false, false);
96+
atexit([]() { console::cleanup(); });
97+
#endif
98+
9299
bool success = true;
93100

94101
for (const auto & test_kv : k_tests()) {

tests/test-tokenizer-1-llama.cpp

+127
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
#include "llama.h"
2+
#include "common.h"
3+
#include "console.h"
4+
5+
#include <cassert>
6+
#include <cstdio>
7+
#include <cstring>
8+
#include <string>
9+
#include <codecvt>
10+
#include <map>
11+
#include <vector>
12+
#include <locale>
13+
14+
typedef int codepoint;
15+
16+
std::string codepoint_to_utf8(codepoint cp) {
17+
std::string result;
18+
if (0x00 <= cp && cp <= 0x7f) {
19+
result.push_back(cp);
20+
} else if (0x80 <= cp && cp <= 0x7ff) {
21+
result.push_back(0xc0 | ((cp >> 6) & 0x1f));
22+
result.push_back(0x80 | (cp & 0x3f));
23+
} else if (0x800 <= cp && cp <= 0xffff) {
24+
result.push_back(0xe0 | ((cp >> 12) & 0x0f));
25+
result.push_back(0x80 | ((cp >> 6) & 0x3f));
26+
result.push_back(0x80 | (cp & 0x3f));
27+
} else if (0x10000 <= cp && cp <= 0x10ffff) {
28+
result.push_back(0xf0 | ((cp >> 18) & 0x07));
29+
result.push_back(0x80 | ((cp >> 12) & 0x3f));
30+
result.push_back(0x80 | ((cp >> 6) & 0x3f));
31+
result.push_back(0x80 | (cp & 0x3f));
32+
} else {
33+
throw std::invalid_argument("invalid codepoint");
34+
}
35+
return result;
36+
}
37+
38+
int main(int argc, char **argv) {
39+
if (argc < 2) {
40+
fprintf(stderr, "Usage: %s <vocab-file>\n", argv[0]);
41+
return 1;
42+
}
43+
44+
const std::string fname = argv[1];
45+
46+
fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
47+
48+
llama_model * model;
49+
llama_context * ctx;
50+
51+
llama_backend_init(false);
52+
53+
// load the vocab
54+
{
55+
auto lparams = llama_context_default_params();
56+
57+
lparams.vocab_only = true;
58+
59+
model = llama_load_model_from_file(fname.c_str(), lparams);
60+
61+
if (model == NULL) {
62+
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
63+
return 1;
64+
}
65+
66+
ctx = llama_new_context_with_model(model, lparams);
67+
68+
if (ctx == NULL) {
69+
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
70+
llama_free_model(model);
71+
return 1;
72+
}
73+
}
74+
75+
GGML_ASSERT(llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_SPM);
76+
77+
#ifdef _WIN32
78+
// We need this for unicode console support
79+
console::init(false, false);
80+
atexit([]() { console::cleanup(); });
81+
#endif
82+
83+
const int n_vocab = llama_n_vocab(ctx);
84+
85+
for (int i = 0; i < n_vocab; ++i) {
86+
std::string str = llama_detokenize_spm(ctx, std::vector<int>(1, i));
87+
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
88+
std::string check = llama_detokenize_spm(ctx, tokens);
89+
if (check != str) {
90+
fprintf(stderr, "%s : error: token %d detokenizes to >%s<(%llu) but tokenization of this detokenizes to >%s<(%llu)\n",
91+
__func__, i, str.c_str(), str.length(), check.c_str(), check.length());
92+
if(i != 3)
93+
return 2;
94+
}
95+
}
96+
97+
for (codepoint cp = 0x0000; cp < 0xffff; ++cp) {
98+
if (cp < 0xd800 || cp > 0xdfff) {
99+
std::string str = codepoint_to_utf8(cp);
100+
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
101+
std::string check = llama_detokenize_spm(ctx, tokens);
102+
if (str != check) {
103+
fprintf(stderr, "%s : error: codepoint %d detokenizes to >%s<(%llu) instead of >%s<(%llu)\n",
104+
__func__, cp, check.c_str(), check.length(), str.c_str(), str.length());
105+
if(cp != 0 && cp != 9601)
106+
return 3;
107+
}
108+
}
109+
}
110+
for (codepoint cp = 0x10000; cp < 0x0010ffff; ++cp) {
111+
std::string str = codepoint_to_utf8(cp);
112+
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
113+
std::string check = llama_detokenize_spm(ctx, tokens);
114+
if (str != check) {
115+
fprintf(stderr, "%s : error: codepoint %d detokenizes to >%s<(%llu) instead of >%s<(%llu)\n",
116+
__func__, cp, check.c_str(), check.length(), str.c_str(), str.length());
117+
return 4;
118+
}
119+
}
120+
121+
llama_free_model(model);
122+
llama_free(ctx);
123+
124+
llama_backend_free();
125+
126+
return 0;
127+
}

tests/test-tokenizer-1.cpp

-108
This file was deleted.

0 commit comments

Comments
 (0)