Skip to content

Commit 717d579

Browse files
authored
Merge pull request #13 from huggingface/mllama-converter-updates
Converter: model_max_length, avoid null chat_template
2 parents ca94ea0 + 5d98a72 commit 717d579

File tree

1 file changed

+3
-1
lines changed

1 file changed

+3
-1
lines changed

src/transformers/models/mllama/convert_mllama_weights_to_hf.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -463,13 +463,15 @@ def __init__(self, vocab_file, num_reserved_special_tokens=256, chat_template=No
463463
self.additional_special_tokens = special_tokens
464464
tokenizer = self.converted()
465465

466+
instruct_kwargs = {"chat_template": chat_template} if instruct else {}
466467
self.tokenizer = PreTrainedTokenizerFast(
467468
tokenizer_object=tokenizer,
468469
bos_token="<|begin_of_text|>",
469470
eos_token="<|end_of_text|>" if not instruct else "<|eot_id|>",
470471
pad_token="<|finetune_right_pad_id|>",
471-
chat_template=chat_template if instruct else None,
472472
model_input_names=["input_ids", "attention_mask"],
473+
model_max_length=131072,
474+
**instruct_kwargs,
473475
)
474476

475477

0 commit comments

Comments
 (0)