Skip to content

Commit 64f07e4

Browse files
committed
Fix endless warning for llava_hf generation
1 parent 8aaa828 commit 64f07e4

File tree

1 file changed

+2
-1
lines changed

1 file changed

+2
-1
lines changed

lmms_eval/models/llava_hf.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -313,14 +313,15 @@ def _collate(x):
313313
num_beams=gen_kwargs["num_beams"],
314314
max_new_tokens=gen_kwargs["max_new_tokens"],
315315
use_cache=self.use_cache,
316+
pad_token_id=self.tokenizer.eos_token_id,
316317
)
317318
except Exception as e:
318319
eval_logger.error(f"Error {e} in generating")
319320
cont = ""
320321
text_outputs = self.tokenizer.batch_decode(cont, skip_special_tokens=True)[0]
321322
if "1.5" in self.pretrained:
322323
text_outputs = text_outputs.split("ASSISTANT:")[-1].strip()
323-
elif "1.6" in self.pretrained:
324+
elif "mistral" in self.pretrained:
324325
text_outputs = text_outputs.split("[/INST]")[-1].strip()
325326
else:
326327
text_outputs = text_outputs.split("ASSISTANT:")[-1].strip()

0 commit comments

Comments
 (0)