Skip to content

Commit 5a624fc

Browse files
DarkLight1337mzusman
authored andcommitted
[VLM] Reorganize profiling/processing-related code (vllm-project#11812)
Signed-off-by: DarkLight1337 <[email protected]>
1 parent 9f744c0 commit 5a624fc

File tree

23 files changed

+833
-760
lines changed

23 files changed

+833
-760
lines changed

tests/models/decoder_only/vision_language/processing/test_llava_next.py

Lines changed: 14 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -4,24 +4,17 @@
44
import pytest
55
from PIL import Image
66
from pqdm.threads import pqdm
7-
from transformers import AutoTokenizer
87

9-
from vllm.inputs import InputProcessingContext
8+
from vllm.multimodal import MULTIMODAL_REGISTRY
109
from vllm.multimodal.parse import ImageSize
10+
from vllm.multimodal.processing import BaseMultiModalProcessor
11+
from vllm.multimodal.utils import cached_get_tokenizer
1112

1213
from ....utils import build_model_context
1314

1415

15-
# Fixtures lazy import to avoid initializing CUDA during test collection
16-
@pytest.fixture()
17-
def processor_for_llava_next():
18-
from vllm.model_executor.models.llava_next import (
19-
LlavaNextMultiModalProcessor)
20-
return LlavaNextMultiModalProcessor
21-
22-
2316
def _validate_image_prompt_replacements_one(
24-
processor,
17+
processor: BaseMultiModalProcessor,
2518
num_imgs: int,
2619
failed_size_excs: list[tuple[ImageSize, Exception]],
2720
image_size: ImageSize,
@@ -78,20 +71,17 @@ def _test_image_prompt_replacements(
7871

7972
@pytest.mark.parametrize("model_id", ["llava-hf/llava-v1.6-mistral-7b-hf"])
8073
@pytest.mark.parametrize("num_imgs", [1, 2])
81-
def test_processor_prompt_replacements_regression(
82-
processor_for_llava_next,
83-
model_id: str,
84-
num_imgs: int,
85-
):
74+
def test_processor_prompt_replacements_regression(model_id, num_imgs):
8675
ctx = build_model_context(
8776
model_name=model_id,
8877
tokenizer_name=model_id,
8978
mm_processor_kwargs=None,
9079
limit_mm_per_prompt={"image": num_imgs},
9180
)
92-
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
93-
ctx = InputProcessingContext(ctx.model_config, tokenizer)
94-
processor = processor_for_llava_next(ctx)
81+
processor = MULTIMODAL_REGISTRY.create_processor(
82+
ctx.model_config,
83+
tokenizer=cached_get_tokenizer(ctx.model_config.tokenizer),
84+
)
9585

9686
image_ratios = [(171, 152), (184, 161), (198, 176), (333, 296), (369, 328),
9787
(488, 183), (2560, 1669)]
@@ -111,20 +101,17 @@ def test_processor_prompt_replacements_regression(
111101
"Comment this out to run it manually.")
112102
@pytest.mark.parametrize("model_id", ["llava-hf/llava-v1.6-mistral-7b-hf"])
113103
@pytest.mark.parametrize("num_imgs", [1])
114-
def test_processor_prompt_replacements_all(
115-
processor_for_llava_next,
116-
model_id: str,
117-
num_imgs: int,
118-
):
104+
def test_processor_prompt_replacements_all(model_id, num_imgs):
119105
ctx = build_model_context(
120106
model_name=model_id,
121107
tokenizer_name=model_id,
122108
mm_processor_kwargs=None,
123109
limit_mm_per_prompt={"image": num_imgs},
124110
)
125-
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
126-
ctx = InputProcessingContext(ctx.model_config, tokenizer)
127-
processor = processor_for_llava_next(ctx)
111+
processor = MULTIMODAL_REGISTRY.create_processor(
112+
ctx.model_config,
113+
tokenizer=cached_get_tokenizer(ctx.model_config.tokenizer),
114+
)
128115

129116
seen_aspect_ratios = set[float]()
130117
image_sizes = list[ImageSize]()

tests/models/decoder_only/vision_language/processing/test_llava_onevision.py

Lines changed: 14 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -4,24 +4,17 @@
44
import pytest
55
from PIL import Image
66
from pqdm.threads import pqdm
7-
from transformers import AutoTokenizer
87

9-
from vllm.inputs import InputProcessingContext
8+
from vllm.multimodal import MULTIMODAL_REGISTRY
109
from vllm.multimodal.parse import ImageSize
10+
from vllm.multimodal.processing import BaseMultiModalProcessor
11+
from vllm.multimodal.utils import cached_get_tokenizer
1112

1213
from ....utils import build_model_context
1314

1415

15-
# Fixtures lazy import to avoid initializing CUDA during test collection
16-
@pytest.fixture()
17-
def processor_for_llava_onevision():
18-
from vllm.model_executor.models.llava_onevision import (
19-
LlavaOnevisionMultiModalProcessor)
20-
return LlavaOnevisionMultiModalProcessor
21-
22-
2316
def _validate_image_prompt_replacements_one(
24-
processor,
17+
processor: BaseMultiModalProcessor,
2518
num_imgs: int,
2619
failed_size_excs: list[tuple[ImageSize, Exception]],
2720
image_size: ImageSize,
@@ -77,20 +70,17 @@ def _test_image_prompt_replacements(
7770
@pytest.mark.parametrize("model_id",
7871
["llava-hf/llava-onevision-qwen2-0.5b-ov-hf"])
7972
@pytest.mark.parametrize("num_imgs", [1, 2])
80-
def test_processor_prompt_replacements_regression(
81-
processor_for_llava_onevision,
82-
model_id: str,
83-
num_imgs: int,
84-
):
73+
def test_processor_prompt_replacements_regression(model_id, num_imgs):
8574
ctx = build_model_context(
8675
model_name=model_id,
8776
tokenizer_name=model_id,
8877
mm_processor_kwargs=None,
8978
limit_mm_per_prompt={"image": num_imgs},
9079
)
91-
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
92-
ctx = InputProcessingContext(ctx.model_config, tokenizer)
93-
processor = processor_for_llava_onevision(ctx)
80+
processor = MULTIMODAL_REGISTRY.create_processor(
81+
ctx.model_config,
82+
tokenizer=cached_get_tokenizer(ctx.model_config.tokenizer),
83+
)
9484

9585
image_ratios = [(171, 152), (184, 161), (198, 176), (333, 296), (369, 328),
9686
(488, 183), (2560, 1669)]
@@ -111,20 +101,17 @@ def test_processor_prompt_replacements_regression(
111101
@pytest.mark.parametrize("model_id",
112102
["llava-hf/llava-onevision-qwen2-0.5b-ov-hf"])
113103
@pytest.mark.parametrize("num_imgs", [1])
114-
def test_processor_prompt_replacements_all(
115-
processor_for_llava_onevision,
116-
model_id: str,
117-
num_imgs: int,
118-
):
104+
def test_processor_prompt_replacements_all(model_id, num_imgs):
119105
ctx = build_model_context(
120106
model_name=model_id,
121107
tokenizer_name=model_id,
122108
mm_processor_kwargs=None,
123109
limit_mm_per_prompt={"image": num_imgs},
124110
)
125-
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
126-
ctx = InputProcessingContext(ctx.model_config, tokenizer)
127-
processor = processor_for_llava_onevision(ctx)
111+
processor = MULTIMODAL_REGISTRY.create_processor(
112+
ctx.model_config,
113+
tokenizer=cached_get_tokenizer(ctx.model_config.tokenizer),
114+
)
128115

129116
seen_aspect_ratios = set[float]()
130117
image_sizes = list[ImageSize]()

tests/models/decoder_only/vision_language/processing/test_phi3v.py

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,13 @@
11
"""Tests for phi3v's multimodal preprocessing kwargs."""
22
import pytest
3-
from transformers import AutoTokenizer
43

5-
from vllm.inputs import InputProcessingContext
6-
from vllm.model_executor.models.phi3v import _IMAGE_TOKEN_ID
4+
from vllm.multimodal import MULTIMODAL_REGISTRY
5+
from vllm.multimodal.utils import cached_get_tokenizer
76

87
from .....conftest import _ImageAssets
98
from ....utils import build_model_context
109

1110

12-
# Wrap lazy imports to avoid initializing CUDA during test collection
13-
@pytest.fixture()
14-
def processor_for_phi3v():
15-
from vllm.model_executor.models.phi3v import Phi3VMultiModalProcessor
16-
return Phi3VMultiModalProcessor
17-
18-
1911
@pytest.mark.parametrize("model_id", ["microsoft/Phi-3.5-vision-instruct"])
2012
# yapf: disable
2113
@pytest.mark.parametrize(
@@ -29,29 +21,33 @@ def processor_for_phi3v():
2921
# yapf: enable
3022
@pytest.mark.parametrize("num_imgs", [1, 2])
3123
def test_processor_override(
32-
processor_for_phi3v,
3324
image_assets: _ImageAssets,
3425
model_id: str,
3526
mm_processor_kwargs: dict[str, int],
3627
expected_toks_per_img: int,
3728
num_imgs: int,
3829
):
3930
"""Ensure input_processor_for_phi3v handles num_crops properly."""
31+
# Avoid initializing CUDA early
32+
from vllm.model_executor.models.phi3v import _IMAGE_TOKEN_ID
33+
4034
ctx = build_model_context(
4135
model_name=model_id,
4236
tokenizer_name=model_id,
4337
trust_remote_code=True,
4438
limit_mm_per_prompt={"image": num_imgs},
4539
)
46-
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
47-
ctx = InputProcessingContext(ctx.model_config, tokenizer)
40+
tokenizer = cached_get_tokenizer(ctx.model_config.tokenizer)
41+
processor = MULTIMODAL_REGISTRY.create_processor(
42+
ctx.model_config,
43+
tokenizer=tokenizer,
44+
)
4845

4946
# Build the image str / prompt based on the number of images we pass
5047
img_str = "".join([f"<|image_{idx}|>\n" for idx in range(1, num_imgs + 1)])
5148
prompt = f"<|user|>\n{img_str}<|end|>\n<|assistant|>\n"
5249
mm_data = {"image": [image_assets[0].pil_image] * num_imgs}
5350

54-
processor = processor_for_phi3v(ctx)
5551
processed_inputs = processor.apply(prompt, mm_data, mm_processor_kwargs)
5652

5753
# Ensure we have the right number of placeholders per num_crops size

tests/models/decoder_only/vision_language/processing/test_qwen2_vl.py

Lines changed: 8 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,12 @@
11
import pytest
2-
from transformers import AutoTokenizer
32

4-
from vllm.inputs import InputProcessingContext
3+
from vllm.multimodal import MULTIMODAL_REGISTRY
4+
from vllm.multimodal.utils import cached_get_tokenizer
55

66
from .....conftest import _ImageAssets
77
from ....utils import build_model_context
88

99

10-
# Fixtures lazy import to avoid initializing CUDA during test collection
11-
@pytest.fixture()
12-
def processor_for_qwen2_vl():
13-
from vllm.model_executor.models.qwen2_vl import Qwen2VLMultiModalProcessor
14-
return Qwen2VLMultiModalProcessor
15-
16-
1710
@pytest.mark.parametrize("model_id", ["Qwen/Qwen2-VL-2B-Instruct"])
1811
# yapf: disable
1912
@pytest.mark.parametrize(
@@ -24,7 +17,6 @@ def processor_for_qwen2_vl():
2417
# yapf: enable
2518
@pytest.mark.parametrize("num_imgs", [1, 2])
2619
def test_processor_override(
27-
processor_for_qwen2_vl,
2820
image_assets: _ImageAssets,
2921
model_id: str,
3022
mm_processor_kwargs: dict[str, object],
@@ -39,18 +31,20 @@ def test_processor_override(
3931
mm_processor_kwargs=None,
4032
limit_mm_per_prompt={"image": num_imgs},
4133
)
42-
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
43-
ctx = InputProcessingContext(ctx.model_config, tokenizer)
34+
tokenizer = cached_get_tokenizer(ctx.model_config.tokenizer)
35+
processor = MULTIMODAL_REGISTRY.create_processor(
36+
ctx.model_config,
37+
tokenizer=tokenizer,
38+
)
4439

4540
# Build the image str / prompt based on the number of images we pass
4641
prompt = "<|vision_start|><|image_pad|><|vision_end|>" * num_imgs
4742
mm_data = {"image": [image_assets[0].pil_image] * num_imgs}
4843

49-
processor = processor_for_qwen2_vl(ctx)
5044
processed_inputs = processor.apply(prompt, mm_data, mm_processor_kwargs)
5145

5246
# Ensure we have the right number of placeholders per num_crops size
53-
hf_processor = processor._get_hf_processor(**mm_processor_kwargs)
47+
hf_processor = processor.info.get_hf_processor(**mm_processor_kwargs)
5448
image_token_id = tokenizer.convert_tokens_to_ids(hf_processor.image_token)
5549
img_tok_count = processed_inputs["prompt_token_ids"].count(image_token_id)
5650
pixel_shape = processed_inputs["mm_kwargs"]["pixel_values"].shape

0 commit comments

Comments
 (0)