From 3b3827c2b0ff2e44d033d424a3cd5c7086037564 Mon Sep 17 00:00:00 2001 From: DarkLight1337 Date: Sat, 26 Apr 2025 03:09:35 +0000 Subject: [PATCH 1/5] [Bugfix[ Fix standard models tests Signed-off-by: DarkLight1337 --- .../decoder_only/language/test_models.py | 48 ++++++++++--------- tests/models/registry.py | 22 +++------ 2 files changed, 32 insertions(+), 38 deletions(-) diff --git a/tests/models/decoder_only/language/test_models.py b/tests/models/decoder_only/language/test_models.py index 85714b85e7e..38f31b09318 100644 --- a/tests/models/decoder_only/language/test_models.py +++ b/tests/models/decoder_only/language/test_models.py @@ -34,50 +34,54 @@ # @maybe_test_rocm_aiter @pytest.mark.parametrize( - "model_arch", + "model", [ pytest.param( - "BloomForCausalLM", # testing alibi slopes + "bigscience/bloom-560m", # bloom - testing alibi slopes marks=[pytest.mark.core_model, pytest.mark.cpu_model], ), pytest.param( - "GPT2LMHeadModel", # gpt2 + "openai-community/gpt2", # gpt2 marks=[pytest.mark.core_model, pytest.mark.cpu_model], ), - pytest.param("GPTJForCausalLM"), - pytest.param("GPTBigCodeForCausalLM"), - pytest.param("GPTNeoXForCausalLM"), + pytest.param("Milos/slovak-gpt-j-405M"), # gptj + pytest.param("bigcode/tiny_starcoder_py"), # gpt_bigcode + pytest.param("EleutherAI/pythia-70m"), # gpt_neox pytest.param( - "GemmaForCausalLM", # gemma + "google/gemma-1.1-2b-it", # gemma marks=[pytest.mark.core_model, pytest.mark.cpu_model], ), - pytest.param("GlmForCausalLM"), pytest.param( - "LlamaForCausalLM", + "THUDM/chatglm3-6b", # chatglm (text-only) + ), + pytest.param( + "meta-llama/Llama-3.2-1B-Instruct", # llama marks=[pytest.mark.core_model, pytest.mark.cpu_model], ), pytest.param( - "MiniCPM3ForCausalLM", + "openbmb/MiniCPM3-4B", # fused_moe not supported on CPU marks=[pytest.mark.core_model], ), pytest.param( - "OPTForCausalLM", + "facebook/opt-125m", # opt marks=[pytest.mark.core_model, pytest.mark.cpu_model], ), pytest.param( - "PhiForCausalLM", + "microsoft/phi-2", # phi marks=[pytest.mark.core_model], ), - pytest.param("QWenLMHeadModel", ), pytest.param( - "Qwen2ForCausalLM", + "Qwen/Qwen-7B", # qwen (text-only) + ), + pytest.param( + "Qwen/Qwen2.5-0.5B-Instruct", # qwen2 marks=[pytest.mark.core_model], ), - pytest.param("StableLmForCausalLM"), - pytest.param("Starcoder2ForCausalLM"), + pytest.param("stabilityai/stablelm-3b-4e1t"), # stablelm + pytest.param("bigcode/starcoder2-3b"), # starcoder2 pytest.param( - "MixtralForCausalLM", + "ehristoforu/Falcon3-MoE-2x7B-Insruct", # mixtral marks=[pytest.mark.cpu_model], ) ]) @@ -86,11 +90,13 @@ @pytest.mark.parametrize("num_logprobs", [5]) @pytest.mark.parametrize( "use_rocm_aiter", [True, False] if current_platform.is_rocm() else [False]) -def test_models(hf_runner, vllm_runner, example_prompts, model_arch: str, +def test_models(hf_runner, vllm_runner, example_prompts, model: str, dtype: str, max_tokens: int, num_logprobs: int, use_rocm_aiter: bool, monkeypatch) -> None: - model = HF_EXAMPLE_MODELS.get_hf_info(model_arch).default + model_info = HF_EXAMPLE_MODELS.find_hf_info(model) + model_info.check_available_online(on_fail="skip") + model_info.check_transformers_version(on_fail="skip") if model in REQUIRES_V0: monkeypatch.setenv("VLLM_USE_V1", "0") @@ -105,10 +111,6 @@ def test_models(hf_runner, vllm_runner, example_prompts, model_arch: str, pytest.skip(f"Skipping '{model}' model test with AITER kernel.") with hf_runner(model, dtype=dtype) as hf_model: - if model.startswith("THUDM/chatglm3"): - hf_model.model.get_output_embeddings = lambda: \ - hf_model.model.transformer.output_layer - hf_outputs = hf_model.generate_greedy_logprobs_limit( example_prompts, max_tokens, num_logprobs) diff --git a/tests/models/registry.py b/tests/models/registry.py index 608f4652e4d..ce9b48febf7 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -124,7 +124,7 @@ def check_available_online( "BloomForCausalLM": _HfExamplesInfo("bigscience/bloomz-1b1"), "ChatGLMModel": _HfExamplesInfo("THUDM/chatglm3-6b", trust_remote_code=True, - max_transformers_version="4.51.1"), + max_transformers_version="4.48"), "ChatGLMForConditionalGeneration": _HfExamplesInfo("thu-coai/ShieldLM-6B-chatglm3", # noqa: E501 trust_remote_code=True), "CohereForCausalLM": _HfExamplesInfo("CohereForAI/c4ai-command-r-v01", @@ -144,8 +144,7 @@ def check_available_online( "FalconForCausalLM": _HfExamplesInfo("tiiuae/falcon-7b"), "GemmaForCausalLM": _HfExamplesInfo("google/gemma-2b"), "Gemma2ForCausalLM": _HfExamplesInfo("google/gemma-2-9b"), - "Gemma3ForCausalLM": _HfExamplesInfo("google/gemma-3-1b-it", - min_transformers_version="4.50"), + "Gemma3ForCausalLM": _HfExamplesInfo("google/gemma-3-1b-it"), "GlmForCausalLM": _HfExamplesInfo("THUDM/glm-4-9b-chat-hf"), "Glm4ForCausalLM": _HfExamplesInfo( "THUDM/GLM-4-32B-0414", @@ -158,8 +157,7 @@ def check_available_online( "GPTNeoXForCausalLM": _HfExamplesInfo("EleutherAI/pythia-160m"), "GraniteForCausalLM": _HfExamplesInfo("ibm/PowerLM-3b"), "GraniteMoeForCausalLM": _HfExamplesInfo("ibm/PowerMoE-3b"), - "GraniteMoeSharedForCausalLM": _HfExamplesInfo("ibm-research/moe-7b-1b-active-shared-experts", # noqa: E501 - min_transformers_version="4.49"), # noqa: E501 + "GraniteMoeSharedForCausalLM": _HfExamplesInfo("ibm-research/moe-7b-1b-active-shared-experts"), # noqa: E501 "Grok1ModelForCausalLM": _HfExamplesInfo("hpcai-tech/grok-1", trust_remote_code=True), "InternLMForCausalLM": _HfExamplesInfo("internlm/internlm-chat-7b", @@ -236,8 +234,7 @@ def check_available_online( "XverseForCausalLM": _HfExamplesInfo("xverse/XVERSE-7B-Chat", is_available_online=False, trust_remote_code=True), - "Zamba2ForCausalLM": _HfExamplesInfo("Zyphra/Zamba2-7B-instruct", - min_transformers_version="4.49"), + "Zamba2ForCausalLM": _HfExamplesInfo("Zyphra/Zamba2-7B-instruct"), # [Encoder-decoder] "BartModel": _HfExamplesInfo("facebook/bart-base"), "BartForConditionalGeneration": _HfExamplesInfo("facebook/bart-large-cnn"), @@ -280,9 +277,7 @@ def check_available_online( "BertForSequenceClassification": _HfExamplesInfo("cross-encoder/ms-marco-MiniLM-L-6-v2"), # noqa: E501 "RobertaForSequenceClassification": _HfExamplesInfo("cross-encoder/quora-roberta-base"), # noqa: E501 "XLMRobertaForSequenceClassification": _HfExamplesInfo("BAAI/bge-reranker-v2-m3"), # noqa: E501 - "ModernBertForSequenceClassification": - _HfExamplesInfo("Alibaba-NLP/gte-reranker-modernbert-base", - min_transformers_version="4.49"), + "ModernBertForSequenceClassification": _HfExamplesInfo("Alibaba-NLP/gte-reranker-modernbert-base"), # noqa: E501 } _MULTIMODAL_EXAMPLE_MODELS = { @@ -298,8 +293,7 @@ def check_available_online( transformers_version_reason="HF model is not compatible.", # noqa: E501 hf_overrides={"architectures": ["DeepseekVLV2ForCausalLM"]}), # noqa: E501 "FuyuForCausalLM": _HfExamplesInfo("adept/fuyu-8b"), - "Gemma3ForConditionalGeneration": _HfExamplesInfo("google/gemma-3-4b-it", - min_transformers_version="4.50"), + "Gemma3ForConditionalGeneration": _HfExamplesInfo("google/gemma-3-4b-it"), "GLM4VForCausalLM": _HfExamplesInfo("THUDM/glm-4v-9b", trust_remote_code=True, hf_overrides={"architectures": ["GLM4VForCausalLM"]}), # noqa: E501 @@ -335,7 +329,6 @@ def check_available_online( extras={"2.6": "openbmb/MiniCPM-V-2_6"}, # noqa: E501 trust_remote_code=True), "Mistral3ForConditionalGeneration": _HfExamplesInfo("mistralai/Mistral-Small-3.1-24B-Instruct-2503", # noqa: E501 - min_transformers_version="4.50", # noqa: E501 extras={"fp8": "nm-testing/Mistral-Small-3.1-24B-Instruct-2503-FP8-dynamic"}), # noqa: E501 "MolmoForCausalLM": _HfExamplesInfo("allenai/Molmo-7B-D-0924", max_transformers_version="4.48", @@ -361,8 +354,7 @@ def check_available_online( hf_overrides={"architectures": ["QwenVLForConditionalGeneration"]}), # noqa: E501 "Qwen2AudioForConditionalGeneration": _HfExamplesInfo("Qwen/Qwen2-Audio-7B-Instruct"), # noqa: E501 "Qwen2VLForConditionalGeneration": _HfExamplesInfo("Qwen/Qwen2-VL-2B-Instruct"), # noqa: E501 - "Qwen2_5_VLForConditionalGeneration": _HfExamplesInfo("Qwen/Qwen2.5-VL-3B-Instruct", # noqa: E501 - min_transformers_version="4.49"), # noqa: E501 + "Qwen2_5_VLForConditionalGeneration": _HfExamplesInfo("Qwen/Qwen2.5-VL-3B-Instruct"), # noqa: E501 "Qwen2_5OmniModel": _HfExamplesInfo("Qwen/Qwen2.5-Omni-7B", # noqa: E501 min_transformers_version="4.52"), # noqa: E501 "SkyworkR1VChatModel": _HfExamplesInfo("Skywork/Skywork-R1V-38B"), From 5c388b7f9ff4007cbf48f7f8654528006a808a2f Mon Sep 17 00:00:00 2001 From: DarkLight1337 Date: Sat, 26 Apr 2025 03:21:24 +0000 Subject: [PATCH 2/5] Update registry Signed-off-by: DarkLight1337 --- tests/distributed/test_pipeline_parallel.py | 6 ++--- .../decoder_only/language/test_models.py | 2 +- tests/models/registry.py | 25 ++++++++++++------- 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/tests/distributed/test_pipeline_parallel.py b/tests/distributed/test_pipeline_parallel.py index 05e30f855ce..1807fab7c0c 100644 --- a/tests/distributed/test_pipeline_parallel.py +++ b/tests/distributed/test_pipeline_parallel.py @@ -161,12 +161,12 @@ def iter_params(self, model_id: str): "deepseek-ai/DeepSeek-V2-Lite-Chat": PPTestSettings.fast(), "LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct": PPTestSettings.fast(), "tiiuae/falcon-7b": PPTestSettings.fast(), - "google/gemma-2b": PPTestSettings.fast(), + "google/gemma-1.1-2b-it": PPTestSettings.fast(), "google/gemma-2-9b": PPTestSettings.fast(), "gpt2": PPTestSettings.fast(), "bigcode/starcoder": PPTestSettings.fast(), "EleutherAI/gpt-j-6b": PPTestSettings.fast(), - "EleutherAI/pythia-12b": PPTestSettings.fast(), + "EleutherAI/pythia-1.4b": PPTestSettings.fast(), "ibm/PowerLM-3b": PPTestSettings.fast(), "ibm/PowerMoE-3b": PPTestSettings.fast(), # Uses Llama @@ -195,7 +195,7 @@ def iter_params(self, model_id: str): "microsoft/Phi-3-small-8k-instruct": PPTestSettings.fast(), "microsoft/Phi-3.5-MoE-instruct": PPTestSettings.detailed(multi_node_only=True, load_format="dummy"), # noqa: E501 "Qwen/Qwen-7B-Chat": PPTestSettings.fast(), - "Qwen/Qwen2-7B-Instruct": PPTestSettings.fast(), + "Qwen/Qwen2.5-1.5B-Instruct": PPTestSettings.fast(), "Qwen/Qwen1.5-MoE-A2.7B-Chat": PPTestSettings.fast(), "stabilityai/stablelm-3b-4e1t": PPTestSettings.fast(), "bigcode/starcoder2-3b": PPTestSettings.fast(), diff --git a/tests/models/decoder_only/language/test_models.py b/tests/models/decoder_only/language/test_models.py index 38f31b09318..c784d8f6166 100644 --- a/tests/models/decoder_only/language/test_models.py +++ b/tests/models/decoder_only/language/test_models.py @@ -72,7 +72,7 @@ marks=[pytest.mark.core_model], ), pytest.param( - "Qwen/Qwen-7B", # qwen (text-only) + "Qwen/Qwen-7B-Chat", # qwen (text-only) ), pytest.param( "Qwen/Qwen2.5-0.5B-Instruct", # qwen2 diff --git a/tests/models/registry.py b/tests/models/registry.py index ce9b48febf7..9596d4be23c 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -121,7 +121,8 @@ def check_available_online( "BaichuanForCausalLM": _HfExamplesInfo("baichuan-inc/Baichuan2-7B-chat", trust_remote_code=True), "BambaForCausalLM": _HfExamplesInfo("ibm-ai-platform/Bamba-9B"), - "BloomForCausalLM": _HfExamplesInfo("bigscience/bloomz-1b1"), + "BloomForCausalLM": _HfExamplesInfo("bigscience/bloom-560m", + {"1b": "bigscience/bloomz-1b1"}), "ChatGLMModel": _HfExamplesInfo("THUDM/chatglm3-6b", trust_remote_code=True, max_transformers_version="4.48"), @@ -151,10 +152,14 @@ def check_available_online( is_available_online=False, min_transformers_version="4.52.dev0" ), - "GPT2LMHeadModel": _HfExamplesInfo("gpt2"), - "GPTBigCodeForCausalLM": _HfExamplesInfo("bigcode/starcoder"), - "GPTJForCausalLM": _HfExamplesInfo("EleutherAI/gpt-j-6b"), - "GPTNeoXForCausalLM": _HfExamplesInfo("EleutherAI/pythia-160m"), + "GPT2LMHeadModel": _HfExamplesInfo("openai-community/gpt2", + {"alias": "gpt2"}), + "GPTBigCodeForCausalLM": _HfExamplesInfo("bigcode/starcoder", + {"tiny": "bigcode/tiny_starcoder_py"}), # noqa: E501 + "GPTJForCausalLM": _HfExamplesInfo("Milos/slovak-gpt-j-405M", + {"6b": "EleutherAI/gpt-j-6b"}), + "GPTNeoXForCausalLM": _HfExamplesInfo("EleutherAI/pythia-70m", + {"1b": "EleutherAI/pythia-1.4b"}), "GraniteForCausalLM": _HfExamplesInfo("ibm/PowerLM-3b"), "GraniteMoeForCausalLM": _HfExamplesInfo("ibm/PowerMoE-3b"), "GraniteMoeSharedForCausalLM": _HfExamplesInfo("ibm-research/moe-7b-1b-active-shared-experts"), # noqa: E501 @@ -185,7 +190,8 @@ def check_available_online( "MiniMaxText01ForCausalLM": _HfExamplesInfo("MiniMaxAI/MiniMax-Text-01", trust_remote_code=True), "MistralForCausalLM": _HfExamplesInfo("mistralai/Mistral-7B-Instruct-v0.1"), - "MixtralForCausalLM": _HfExamplesInfo("mistralai/Mixtral-8x7B-Instruct-v0.1"), # noqa: E501 + "MixtralForCausalLM": _HfExamplesInfo("mistralai/Mixtral-8x7B-Instruct-v0.1", # noqa: E501 + {"falcon3": "ehristoforu/Falcon3-MoE-2x7B-Insruct"}), # noqa: E501 "QuantMixtralForCausalLM": _HfExamplesInfo("mistral-community/Mixtral-8x22B-v0.1-AWQ"), # noqa: E501 "MptForCausalLM": _HfExamplesInfo("mpt", is_available_online=False), "MPTForCausalLM": _HfExamplesInfo("mosaicml/mpt-7b"), @@ -193,7 +199,8 @@ def check_available_online( "OlmoForCausalLM": _HfExamplesInfo("allenai/OLMo-1B-hf"), "Olmo2ForCausalLM": _HfExamplesInfo("shanearora/OLMo-7B-1124-hf"), "OlmoeForCausalLM": _HfExamplesInfo("allenai/OLMoE-1B-7B-0924-Instruct"), - "OPTForCausalLM": _HfExamplesInfo("facebook/opt-iml-max-1.3b"), + "OPTForCausalLM": _HfExamplesInfo("facebook/opt-125m", + {"1b": "facebook/opt-iml-max-1.3b"}), "OrionForCausalLM": _HfExamplesInfo("OrionStarAI/Orion-14B-Chat", trust_remote_code=True), "PersimmonForCausalLM": _HfExamplesInfo("adept/persimmon-8b-chat"), @@ -207,8 +214,8 @@ def check_available_online( trust_remote_code=True), "QWenLMHeadModel": _HfExamplesInfo("Qwen/Qwen-7B-Chat", trust_remote_code=True), - "Qwen2ForCausalLM": _HfExamplesInfo("Qwen/Qwen2-7B-Instruct", - extras={"2.5": "Qwen/Qwen2.5-7B-Instruct"}), # noqa: E501 + "Qwen2ForCausalLM": _HfExamplesInfo("Qwen/Qwen2.5-0.5B-Instruct", + extras={"2.5": "Qwen/Qwen2.5-1.5B-Instruct"}), # noqa: E501 "Qwen2MoeForCausalLM": _HfExamplesInfo("Qwen/Qwen1.5-MoE-A2.7B-Chat"), "Qwen3ForCausalLM": _HfExamplesInfo( "Qwen/Qwen3-8B", From 2e41196e8de39177f2e81e73ea0ef1ef1b79d2b9 Mon Sep 17 00:00:00 2001 From: DarkLight1337 Date: Sat, 26 Apr 2025 04:56:33 +0000 Subject: [PATCH 3/5] Fix Signed-off-by: DarkLight1337 --- docs/source/models/supported_models.md | 2 +- tests/models/registry.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/models/supported_models.md b/docs/source/models/supported_models.md index 20a706a0b8e..7bc5dcc1778 100644 --- a/docs/source/models/supported_models.md +++ b/docs/source/models/supported_models.md @@ -322,7 +322,7 @@ See [this page](#generative-models) for more information on how to use generativ * ✅︎ - * `GemmaForCausalLM` * Gemma - * `google/gemma-2b`, `google/gemma-7b`, etc. + * `google/gemma-2b`, `google/gemma-1.1-2b-it`, etc. * ✅︎ * ✅︎ - * `Gemma2ForCausalLM` diff --git a/tests/models/registry.py b/tests/models/registry.py index 9596d4be23c..04ecbf23d0f 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -143,7 +143,7 @@ def check_available_online( "ExaoneForCausalLM": _HfExamplesInfo("LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct"), # noqa: E501 "Fairseq2LlamaForCausalLM": _HfExamplesInfo("mgleize/fairseq2-dummy-Llama-3.2-1B"), # noqa: E501 "FalconForCausalLM": _HfExamplesInfo("tiiuae/falcon-7b"), - "GemmaForCausalLM": _HfExamplesInfo("google/gemma-2b"), + "GemmaForCausalLM": _HfExamplesInfo("google/gemma-1.1-2b-it"), "Gemma2ForCausalLM": _HfExamplesInfo("google/gemma-2-9b"), "Gemma3ForCausalLM": _HfExamplesInfo("google/gemma-3-1b-it"), "GlmForCausalLM": _HfExamplesInfo("THUDM/glm-4-9b-chat-hf"), From 90d194d66aeb39cdef1040ab2f6c3cc61cfa31ef Mon Sep 17 00:00:00 2001 From: DarkLight1337 Date: Sat, 26 Apr 2025 05:30:20 +0000 Subject: [PATCH 4/5] Address comments Signed-off-by: DarkLight1337 --- tests/distributed/test_pipeline_parallel.py | 2 +- tests/models/registry.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/distributed/test_pipeline_parallel.py b/tests/distributed/test_pipeline_parallel.py index 1807fab7c0c..03de8d9b92b 100644 --- a/tests/distributed/test_pipeline_parallel.py +++ b/tests/distributed/test_pipeline_parallel.py @@ -195,7 +195,7 @@ def iter_params(self, model_id: str): "microsoft/Phi-3-small-8k-instruct": PPTestSettings.fast(), "microsoft/Phi-3.5-MoE-instruct": PPTestSettings.detailed(multi_node_only=True, load_format="dummy"), # noqa: E501 "Qwen/Qwen-7B-Chat": PPTestSettings.fast(), - "Qwen/Qwen2.5-1.5B-Instruct": PPTestSettings.fast(), + "Qwen/Qwen2.5-0.5B-Instruct": PPTestSettings.fast(), "Qwen/Qwen1.5-MoE-A2.7B-Chat": PPTestSettings.fast(), "stabilityai/stablelm-3b-4e1t": PPTestSettings.fast(), "bigcode/starcoder2-3b": PPTestSettings.fast(), diff --git a/tests/models/registry.py b/tests/models/registry.py index 04ecbf23d0f..d29c8ce633c 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -214,8 +214,8 @@ def check_available_online( trust_remote_code=True), "QWenLMHeadModel": _HfExamplesInfo("Qwen/Qwen-7B-Chat", trust_remote_code=True), - "Qwen2ForCausalLM": _HfExamplesInfo("Qwen/Qwen2.5-0.5B-Instruct", - extras={"2.5": "Qwen/Qwen2.5-1.5B-Instruct"}), # noqa: E501 + "Qwen2ForCausalLM": _HfExamplesInfo("Qwen/Qwen2-0.5B-Instruct", + extras={"2.5": "Qwen/Qwen2.5-0.5B-Instruct"}), # noqa: E501 "Qwen2MoeForCausalLM": _HfExamplesInfo("Qwen/Qwen1.5-MoE-A2.7B-Chat"), "Qwen3ForCausalLM": _HfExamplesInfo( "Qwen/Qwen3-8B", From 9757326e2e37cd1d5a0fe8a91be2c67d3de19ad0 Mon Sep 17 00:00:00 2001 From: DarkLight1337 Date: Sat, 26 Apr 2025 09:19:58 +0000 Subject: [PATCH 5/5] Fix Signed-off-by: DarkLight1337 --- .../decoder_only/language/test_models.py | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/tests/models/decoder_only/language/test_models.py b/tests/models/decoder_only/language/test_models.py index c784d8f6166..d35d87459cd 100644 --- a/tests/models/decoder_only/language/test_models.py +++ b/tests/models/decoder_only/language/test_models.py @@ -9,6 +9,7 @@ from vllm.platforms import current_platform +from ....utils import large_gpu_mark from ...registry import HF_EXAMPLE_MODELS from ...utils import check_logprobs_close @@ -26,7 +27,7 @@ AITER_MODEL_LIST = [ "meta-llama/Llama-3.2-1B-Instruct", "openbmb/MiniCPM3-4B", - "Qwen/Qwen-7B", + "Qwen/Qwen-7B-Chat", "Qwen/Qwen2.5-0.5B-Instruct", "ehristoforu/Falcon3-MoE-2x7B-Insruct", ] @@ -61,7 +62,8 @@ pytest.param( "openbmb/MiniCPM3-4B", # fused_moe not supported on CPU - marks=[pytest.mark.core_model], + marks=[pytest.mark.core_model, + large_gpu_mark(min_gb=32)], ), pytest.param( "facebook/opt-125m", # opt @@ -82,17 +84,17 @@ pytest.param("bigcode/starcoder2-3b"), # starcoder2 pytest.param( "ehristoforu/Falcon3-MoE-2x7B-Insruct", # mixtral - marks=[pytest.mark.cpu_model], + marks=[pytest.mark.cpu_model, + large_gpu_mark(min_gb=48)], ) ]) -@pytest.mark.parametrize("dtype", ["half"]) @pytest.mark.parametrize("max_tokens", [32]) @pytest.mark.parametrize("num_logprobs", [5]) @pytest.mark.parametrize( "use_rocm_aiter", [True, False] if current_platform.is_rocm() else [False]) def test_models(hf_runner, vllm_runner, example_prompts, model: str, - dtype: str, max_tokens: int, num_logprobs: int, - use_rocm_aiter: bool, monkeypatch) -> None: + max_tokens: int, num_logprobs: int, use_rocm_aiter: bool, + monkeypatch) -> None: model_info = HF_EXAMPLE_MODELS.find_hf_info(model) model_info.check_available_online(on_fail="skip") @@ -110,11 +112,17 @@ def test_models(hf_runner, vllm_runner, example_prompts, model: str, # in parts of the operators pytest.skip(f"Skipping '{model}' model test with AITER kernel.") - with hf_runner(model, dtype=dtype) as hf_model: + with hf_runner(model) as hf_model: hf_outputs = hf_model.generate_greedy_logprobs_limit( example_prompts, max_tokens, num_logprobs) - with vllm_runner(model, dtype=dtype) as vllm_model: + with vllm_runner( + model, + tokenizer_name=model_info.tokenizer or model, + tokenizer_mode=model_info.tokenizer_mode, + trust_remote_code=model_info.trust_remote_code, + max_num_seqs=2, + ) as vllm_model: vllm_outputs = vllm_model.generate_greedy_logprobs( example_prompts, max_tokens, num_logprobs)