Skip to content

Commit 4e87962

Browse files
authored
mtmd : fix glm-edge redundant token count (#13139)
* mtmd : fix glm-edge redundant token count * fix chat template * temporary disable GLMEdge test chat tmpl
1 parent fb0471d commit 4e87962

File tree

3 files changed

+11
-26
lines changed

3 files changed

+11
-26
lines changed

examples/llava/mtmd.cpp

+1-9
Original file line numberDiff line numberDiff line change
@@ -203,9 +203,6 @@ int32_t mtmd_tokenize(mtmd_context * ctx,
203203
}
204204

205205
// llava-1.5, llava-1.6, Yi-VL, Yi-34B, granite: don't need to add prefix and suffix
206-
// for glm-edge, we don't need to add because the tokens are already in the returned embeddings
207-
208-
// TODO @ngxson : glm-edge : remove BOI / EOI tokens embeddings, decode them as normal tokens
209206

210207
std::vector<std::string> parts = string_split_str(prompt_modified, ctx->image_marker);
211208
output.clear();
@@ -246,7 +243,7 @@ int32_t mtmd_tokenize(mtmd_context * ctx,
246243
};
247244

248245
for (const auto & part : parts) {
249-
//printf("tokenizing part: %s\n", part.c_str());
246+
// printf("tokenizing part: %s\n", part.c_str());
250247
bool add_bos = &parts.front() == &part;
251248
auto tokens = mtmd_tokenize_text_internal(vocab, part, text.add_special && add_bos, text.parse_special);
252249
if (tokens.empty()) {
@@ -338,11 +335,6 @@ int32_t mtmd_tokenize(mtmd_context * ctx,
338335
LOG_DBG("image_tokens->ny = %d\n", image_tokens->ny);
339336
LOG_DBG("batch_f32 size = %d\n", (int)image_tokens->batch_f32.entries.size());
340337

341-
if (clip_is_glm(ctx->ctx_clip)) {
342-
// glm-edge
343-
image_tokens->nx += 2; // add 2 for the begin_of_image and end_of_image token embeddings
344-
}
345-
346338
mtmd_input_chunk chunk{
347339
MTMD_INPUT_CHUNK_TYPE_IMAGE,
348340
{},

src/llama-chat.cpp

+1-9
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,7 @@ int32_t llm_chat_apply_template(
447447
if (add_ass) {
448448
ss << "<|assistant|>";
449449
}
450-
} else if (tmpl == LLM_CHAT_TEMPLATE_CHATGLM_4) {
450+
} else if (tmpl == LLM_CHAT_TEMPLATE_CHATGLM_4 || tmpl == LLM_CHAT_TEMPLATE_GLMEDGE) {
451451
ss << "[gMASK]" << "<sop>";
452452
for (auto message : chat) {
453453
std::string role(message->role);
@@ -456,14 +456,6 @@ int32_t llm_chat_apply_template(
456456
if (add_ass) {
457457
ss << "<|assistant|>";
458458
}
459-
} else if (tmpl == LLM_CHAT_TEMPLATE_GLMEDGE) {
460-
for (auto message : chat) {
461-
std::string role(message->role);
462-
ss << "<|" << role << "|>" << "\n" << message->content;
463-
}
464-
if (add_ass) {
465-
ss << "<|assistant|>";
466-
}
467459
} else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) {
468460
// MiniCPM-3B-OpenHermes-2.5-v2-GGUF
469461
for (auto message : chat) {

tests/test-chat-template.cpp

+9-8
Original file line numberDiff line numberDiff line change
@@ -187,14 +187,15 @@ int main(void) {
187187
/* .bos_token= */ "",
188188
/* .eos_token= */ "",
189189
},
190-
{
191-
/* .name= */ "GLMEdge",
192-
/* .template_str= */ "{% for item in messages %}{% if item['role'] == 'system' %}<|system|>\n{{ item['content'] }}{% elif item['role'] == 'user' %}<|user|>\n{{ item['content'] }}{% elif item['role'] == 'assistant' %}<|assistant|>\n{{ item['content'] }}{% endif %}{% endfor %}<|assistant|>",
193-
/* .expected_output= */ "<|system|>\nYou are a helpful assistant<|user|>\nHello<|assistant|>\nHi there<|user|>\nWho are you<|assistant|>\n I am an assistant <|user|>\nAnother question<|assistant|>",
194-
/* .expected_output_jinja= */ "<|system|>\nYou are a helpful assistant<|user|>\nHello<|assistant|>\nHi there<|user|>\nWho are you<|assistant|>\n I am an assistant <|user|>\nAnother question<|assistant|>",
195-
/* .bos_token= */ "",
196-
/* .eos_token= */ "",
197-
},
190+
// TODO @ngxson : GLMEdge produces poor result without `[gMASK]<sop>`, so we're temporarily using GLM4 template for it. We should fix this in the future.
191+
// {
192+
// /* .name= */ "GLMEdge",
193+
// /* .template_str= */ "{% for item in messages %}{% if item['role'] == 'system' %}<|system|>\n{{ item['content'] }}{% elif item['role'] == 'user' %}<|user|>\n{{ item['content'] }}{% elif item['role'] == 'assistant' %}<|assistant|>\n{{ item['content'] }}{% endif %}{% endfor %}<|assistant|>",
194+
// /* .expected_output= */ "<|system|>\nYou are a helpful assistant<|user|>\nHello<|assistant|>\nHi there<|user|>\nWho are you<|assistant|>\n I am an assistant <|user|>\nAnother question<|assistant|>",
195+
// /* .expected_output_jinja= */ "<|system|>\nYou are a helpful assistant<|user|>\nHello<|assistant|>\nHi there<|user|>\nWho are you<|assistant|>\n I am an assistant <|user|>\nAnother question<|assistant|>",
196+
// /* .bos_token= */ "",
197+
// /* .eos_token= */ "",
198+
// },
198199
{
199200
/* .name= */ "MiniCPM-3B-OpenHermes-2.5-v2-GGUF",
200201
/* .template_str= */ U8C("{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + '<AI>'}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}"),

0 commit comments

Comments
 (0)