Skip to content

Validate special token ids are in range when loading GGUF model #3635

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
2 changes: 1 addition & 1 deletion convert-baichuan-hf-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ def parse_args() -> argparse.Namespace:
gguf_writer.add_token_scores(scores)
gguf_writer.add_token_types(toktypes)

special_vocab = gguf.SpecialVocab(dir_model)
special_vocab = gguf.SpecialVocab(dir_model, n_vocab = len(tokens))
special_vocab.add_to_gguf(gguf_writer)

# TENSORS
Expand Down
2 changes: 1 addition & 1 deletion convert-bloom-hf-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def parse_args() -> argparse.Namespace:
gguf_writer.add_token_scores(scores)
gguf_writer.add_token_types(toktypes)

special_vocab = gguf.SpecialVocab(dir_model, load_merges=True)
special_vocab = gguf.SpecialVocab(dir_model, load_merges=True, n_vocab = len(tokens))
special_vocab.add_to_gguf(gguf_writer)

# TENSORS
Expand Down
2 changes: 1 addition & 1 deletion convert-falcon-hf-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def parse_args() -> argparse.Namespace:
gguf_writer.add_token_scores(scores)
gguf_writer.add_token_types(toktypes)

special_vocab = gguf.SpecialVocab(dir_model, load_merges = True)
special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens))
special_vocab.add_to_gguf(gguf_writer)

# TENSORS
Expand Down
2 changes: 1 addition & 1 deletion convert-gptneox-hf-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def parse_args() -> argparse.Namespace:
gguf_writer.add_token_scores(scores)
gguf_writer.add_token_types(toktypes)

special_vocab = gguf.SpecialVocab(dir_model, load_merges = True)
special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens))
special_vocab.add_to_gguf(gguf_writer)

# TENSORS
Expand Down
4 changes: 3 additions & 1 deletion convert-llama-ggml-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,9 @@ def handle_metadata(cfg, hp):
cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir,
cfg.vocabtype )
# FIXME: Respect cfg.vocab_dir?
svocab = gguf.SpecialVocab(cfg.model_metadata_dir)
svocab = gguf.SpecialVocab(cfg.model_metadata_dir,
load_merges = cfg.vocabtype == 'bpe',
n_vocab = vocab.vocab_size)
convert.check_vocab_size(params, vocab)
return (params, vocab, svocab)

Expand Down
2 changes: 1 addition & 1 deletion convert-mpt-hf-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def parse_args() -> argparse.Namespace:
gguf_writer.add_token_scores(scores)
gguf_writer.add_token_types(toktypes)

special_vocab = gguf.SpecialVocab(dir_model, load_merges = True)
special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens))
special_vocab.add_to_gguf(gguf_writer)

# TENSORS
Expand Down
2 changes: 1 addition & 1 deletion convert-refact-hf-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def parse_args() -> argparse.Namespace:
gguf_writer.add_token_scores(scores)
gguf_writer.add_token_types(toktypes)

special_vocab = gguf.SpecialVocab(dir_model, load_merges=True)
special_vocab = gguf.SpecialVocab(dir_model, load_merges=True, n_vocab = len(tokens))
special_vocab.add_to_gguf(gguf_writer)

# TENSORS
Expand Down
2 changes: 1 addition & 1 deletion convert-starcoder-hf-to-gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def parse_args() -> argparse.Namespace:
gguf_writer.add_token_scores(scores)
gguf_writer.add_token_types(toktypes)

special_vocab = gguf.SpecialVocab(dir_model, load_merges = True)
special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens))
special_vocab.add_to_gguf(gguf_writer)

# TENSORS
Expand Down
13 changes: 9 additions & 4 deletions convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@ def __init__(self, fname_tokenizer: Path, fname_added_tokens: Path | None) -> No
expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
actual_ids = sorted(added_tokens.values())
if expected_ids != actual_ids:
raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}")
raise Exception(f"Expected added token IDs to be sequential and start at {vocab_size}; got {actual_ids}")

items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
self.added_tokens_list = [text for (text, idx) in items]
Expand Down Expand Up @@ -1159,10 +1159,13 @@ def main(args_in: list[str] | None = None) -> None:

vocab: Vocab
if args.vocab_only:
assert args.outfile, "need --outfile if using --vocab-only"
if not args.outfile:
raise ValueError("need --outfile if using --vocab-only")
# FIXME: Try to respect vocab_dir somehow?
vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype)
special_vocab = gguf.SpecialVocab(model_plus.paths[0].parent, load_merges = args.vocabtype == 'bpe')
special_vocab = gguf.SpecialVocab(model_plus.paths[0].parent,
load_merges = args.vocabtype == 'bpe',
n_vocab = vocab.vocab_size)
outfile = args.outfile
OutputFile.write_vocab_only(outfile, params, vocab, special_vocab)
print(f"Wrote {outfile}")
Expand All @@ -1174,7 +1177,9 @@ def main(args_in: list[str] | None = None) -> None:
vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent
vocab = load_vocab(vocab_dir, args.vocabtype)
# FIXME: Try to respect vocab_dir somehow?
special_vocab = gguf.SpecialVocab(model_plus.paths[0].parent, load_merges = args.vocabtype == 'bpe')
special_vocab = gguf.SpecialVocab(model_plus.paths[0].parent,
load_merges = args.vocabtype == 'bpe',
n_vocab = vocab.vocab_size)

model = model_plus.model
model = convert_model_names(model, params)
Expand Down
36 changes: 25 additions & 11 deletions gguf-py/gguf/gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -968,12 +968,15 @@ class SpecialVocab:
merges: list[str] = []
special_token_types: tuple[str, ...] = ('bos', 'eos', 'unk', 'sep', 'pad')
special_token_ids: dict[str, int] = {}
n_vocab: int | None = None

def __init__(
self, path: str | os.PathLike[str], load_merges: bool = False,
special_token_types: tuple[str, ...] | None = None,
n_vocab: int | None = None,
):
self.special_token_ids = {}
self.n_vocab = n_vocab
self.load_merges = load_merges
if special_token_types is not None:
self.special_token_types = special_token_types
Expand All @@ -983,6 +986,16 @@ def _load(self, path: Path) -> None:
if not self._try_load_from_tokenizer_json(path):
self._try_load_from_config_json(path)

def _set_special_token(self, typ: str, tid: Any):
if not isinstance(tid, int) or tid < 0:
return
if self.n_vocab is None or tid < self.n_vocab:
self.special_token_ids[typ] = tid
return
print(f'gguf: WARNING: Special token type {typ}, id {tid} out of range, must be under {self.n_vocab} - skipping',
file = sys.stderr)


def _try_load_from_tokenizer_json(self, path: Path) -> bool:
tokenizer_file = path / 'tokenizer.json'
if not tokenizer_file.is_file():
Expand Down Expand Up @@ -1010,10 +1023,11 @@ def _try_load_from_tokenizer_json(self, path: Path) -> bool:
tc_content = entry_content
else:
continue
for maybe_token_id in (atok.get('id') for atok in added_tokens if atok.get('content') == tc_content):
if isinstance(maybe_token_id, int) and maybe_token_id >= 0:
self.special_token_ids[typ] = maybe_token_id
break
# We only need the first match here.
maybe_token_id = next((
atok.get('id') for atok in added_tokens
if atok.get('content') == tc_content), None)
self._set_special_token(typ, maybe_token_id)
return True

def _try_load_from_config_json(self, path: Path) -> bool:
Expand All @@ -1023,21 +1037,21 @@ def _try_load_from_config_json(self, path: Path) -> bool:
with open(config_file, encoding = 'utf-8') as f:
config = json.load(f)
for typ in self.special_token_types:
maybe_token_id = config.get(f'{typ}_token_id')
if isinstance(maybe_token_id, int) and maybe_token_id >= 0:
self.special_token_ids[typ] = maybe_token_id
self._set_special_token(typ, config.get(f'{typ}_token_id'))
return True

def add_to_gguf(self, gw: GGUFWriter) -> None:
def add_to_gguf(self, gw: GGUFWriter, quiet: bool = False) -> None:
if len(self.merges) > 0:
print(f'gguf: Adding {len(self.merges)} merge(s).')
if not quiet:
print(f'gguf: Adding {len(self.merges)} merge(s).')
gw.add_token_merges(self.merges)
for typ, tokid in self.special_token_ids.items():
handler: Callable[[int], None] | None = getattr(gw, f'add_{typ}_token_id', None)
if handler is None:
print(f'gguf: WARNING: No handler for special token type {typ} with id {tokid} - skipping')
print(f'gguf: WARNING: No handler for special token type {typ} with id {tokid} - skipping', file = sys.stderr)
continue
print(f'gguf: Setting special token type {typ} to {tokid}')
if not quiet:
print(f'gguf: Setting special token type {typ} to {tokid}')
handler(tokid)

def __repr__(self) -> str:
Expand Down
37 changes: 28 additions & 9 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2235,15 +2235,35 @@ static void llm_load_vocab(
if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
vocab.linefeed_id = llama_byte_to_token(vocab, '\n');
} else {
vocab.linefeed_id = llama_tokenize_internal(vocab, "\u010A", false)[0];
const std::vector<int> ids = llama_tokenize_internal(vocab, "\u010A", false);
GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
vocab.linefeed_id = ids[0];
}

// special tokens
GGUF_GET_KEY(ctx, vocab.special_bos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_BOS_ID));
GGUF_GET_KEY(ctx, vocab.special_eos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_EOS_ID));
GGUF_GET_KEY(ctx, vocab.special_unk_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_UNK_ID));
GGUF_GET_KEY(ctx, vocab.special_sep_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_SEP_ID));
GGUF_GET_KEY(ctx, vocab.special_pad_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_PAD_ID));
{
const std::vector<std::pair<enum llm_kv, int32_t &>> special_token_types = {
{ LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id },
{ LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id },
{ LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id },
{ LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id },
{ LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id },
};
for (const auto & it : special_token_types) {
const std::string & key = kv(std::get<0>(it));
int32_t & id = std::get<1>(it), old_id = id;

GGUF_GET_KEY(ctx, id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, key);
// Must be >= -1 and < vocab size. Since the key is unsigned, -1
// can only come from the default value, so there's no point in
// validating that.
if (size_t(id + 1) > vocab.id_to_token.size()) {
LLAMA_LOG_WARN("%s: bad special token: '%s' = %d, using default id %d\n",
__func__, key.c_str(), id, old_id);
id = old_id;
}
}
}

// build special tokens cache
{
Expand Down Expand Up @@ -6084,11 +6104,10 @@ static uint8_t llama_token_to_byte(const llama_vocab& vocab, llama_token id) {
}

static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) {
static const char * hex = "0123456789ABCDEF";
switch (llama_vocab_get_type(vocab)) {
case LLAMA_VOCAB_TYPE_SPM: {
char buf[7];
int result = snprintf(buf, sizeof(buf), "<0x%02X>", ch);
GGML_ASSERT(0 <= result && result < 7);
const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
return vocab.token_to_id.at(buf);
}
case LLAMA_VOCAB_TYPE_BPE: {
Expand Down