Skip to content

Commit d2a4ef0

Browse files
authored
vocab : add ByteDance-Seed/Seed-Coder (#13423)
1 parent 15e6125 commit d2a4ef0

File tree

4 files changed

+16
-0
lines changed

4 files changed

+16
-0
lines changed

convert_hf_to_gguf.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -798,6 +798,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
798798
if chkhsh == "0e9433cbbb161f89e264eb32e8e64bfe69e834973ffca5d41d3948a604a3e2a3":
799799
# ref: https://huggingface.co/mistral-community/pixtral-12b
800800
res = "pixtral"
801+
if chkhsh == "d5f1dd6f980fec569fb218a81a7658ac45fc56b38c5a0adeb1c232fbe04ef5ec":
802+
# ref: https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base
803+
res = "seed-coder"
801804

802805
if res is None:
803806
logger.warning("\n")

convert_hf_to_gguf_update.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,7 @@ class TOKENIZER_TYPE(IntEnum):
116116
{"name": "llama4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct", },
117117
{"name": "glm4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-hf", },
118118
{"name": "pixtral", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistral-community/pixtral-12b", },
119+
{"name": "seed-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base", },
119120
]
120121

121122

include/llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ extern "C" {
112112
LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32,
113113
LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33,
114114
LLAMA_VOCAB_PRE_TYPE_PIXTRAL = 34,
115+
LLAMA_VOCAB_PRE_TYPE_SEED_CODER = 35,
115116
};
116117

117118
enum llama_rope_type {

src/llama-vocab.cpp

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -415,6 +415,13 @@ struct llm_tokenizer_bpe : llm_tokenizer {
415415
"'(?:[sSdDmMtT]|[lL][lL]|[vV][eE]|[rR][eE])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]|\\s+(?!\\S)|\\s+",
416416
};
417417
break;
418+
case LLAMA_VOCAB_PRE_TYPE_SEED_CODER:
419+
regex_exprs = {
420+
// original regex from tokenizer.json
421+
// "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1}| ?[^\\s\\p{L}\\p{N}\r\n]+|\\s*[\r\n]+|\\s+(?!\\S)|\\s+"
422+
"(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1}| ?[^\\s\\p{L}\\p{N}\\r\\n]+|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
423+
};
424+
break;
418425
default:
419426
// default regex for BPE tokenization pre-processing
420427
regex_exprs = {
@@ -1634,6 +1641,10 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
16341641
tokenizer_pre == "bailingmoe") {
16351642
pre_type = LLAMA_VOCAB_PRE_TYPE_BAILINGMOE;
16361643
clean_spaces = false;
1644+
} else if (
1645+
tokenizer_pre == "seed-coder") {
1646+
pre_type = LLAMA_VOCAB_PRE_TYPE_SEED_CODER;
1647+
clean_spaces = false;
16371648
} else {
16381649
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
16391650
}

0 commit comments

Comments
 (0)