File tree 2 files changed +0
-16
lines changed
decoder_only/vision_language 2 files changed +0
-16
lines changed Original file line number Diff line number Diff line change 318
318
use_tokenizer_eos = True ,
319
319
patch_hf_runner = model_utils .internvl_patch_hf_runner ,
320
320
),
321
- "llama4" : VLMTestInfo (
322
- models = ["meta-llama/Llama-4-Scout-17B-16E-Instruct" ],
323
- prompt_formatter = lambda img_prompt : f"<|begin_of_text|><|header_start|>user<|header_end|>\n \n { img_prompt } <|eot|><|header_start|>assistant<|header_end|>\n \n " , # noqa: E501
324
- img_idx_to_prompt = lambda _ : "<|image|>" ,
325
- test_type = (VLMTestType .IMAGE , VLMTestType .MULTI_IMAGE ),
326
- distributed_executor_backend = "mp" ,
327
- image_size_factors = [(.25 , 0.5 , 1.0 )],
328
- hf_model_kwargs = {"device_map" : "auto" },
329
- max_model_len = 8192 ,
330
- max_num_seqs = 4 ,
331
- dtype = "bfloat16" ,
332
- auto_cls = AutoModelForImageTextToText ,
333
- tensor_parallel_size = 4 ,
334
- marks = multi_gpu_marks (num_gpus = 4 ),
335
- ),
336
321
"llava_next" : VLMTestInfo (
337
322
models = ["llava-hf/llava-v1.6-mistral-7b-hf" ],
338
323
test_type = (VLMTestType .IMAGE , VLMTestType .CUSTOM_INPUTS ),
Original file line number Diff line number Diff line change @@ -361,7 +361,6 @@ def check_available_online(
361
361
tokenizer = "facebook/bart-base" ,
362
362
trust_remote_code = True ), # noqa: E501
363
363
"MllamaForConditionalGeneration" : _HfExamplesInfo ("meta-llama/Llama-3.2-11B-Vision-Instruct" ), # noqa: E501
364
- "Llama4ForConditionalGeneration" : _HfExamplesInfo ("meta-llama/Llama-4-Scout-17B-16E-Instruct" ), # noqa: E501
365
364
"WhisperForConditionalGeneration" : _HfExamplesInfo ("openai/whisper-large-v3" ), # noqa: E501
366
365
}
367
366
You can’t perform that action at this time.
0 commit comments