Skip to content

Commit 90c2547

Browse files
committed
Fix Phi3V and Qwen2-VL tests
Signed-off-by: DarkLight1337 <[email protected]>
1 parent 1d0fab0 commit 90c2547

File tree

4 files changed

+35
-31
lines changed

4 files changed

+35
-31
lines changed

tests/models/decoder_only/vision_language/processing/test_phi3v.py

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,13 @@
11
"""Tests for phi3v's multimodal preprocessing kwargs."""
22
import pytest
3-
from transformers import AutoTokenizer
43

5-
from vllm.inputs import InputProcessingContext
6-
from vllm.model_executor.models.phi3v import _IMAGE_TOKEN_ID
4+
from vllm.multimodal import MULTIMODAL_REGISTRY
5+
from vllm.multimodal.utils import cached_get_tokenizer
76

87
from .....conftest import _ImageAssets
98
from ....utils import build_model_context
109

1110

12-
# Wrap lazy imports to avoid initializing CUDA during test collection
13-
@pytest.fixture()
14-
def processor_for_phi3v():
15-
from vllm.model_executor.models.phi3v import Phi3VMultiModalProcessor
16-
return Phi3VMultiModalProcessor
17-
18-
1911
@pytest.mark.parametrize("model_id", ["microsoft/Phi-3.5-vision-instruct"])
2012
# yapf: disable
2113
@pytest.mark.parametrize(
@@ -29,29 +21,33 @@ def processor_for_phi3v():
2921
# yapf: enable
3022
@pytest.mark.parametrize("num_imgs", [1, 2])
3123
def test_processor_override(
32-
processor_for_phi3v,
3324
image_assets: _ImageAssets,
3425
model_id: str,
3526
mm_processor_kwargs: dict[str, int],
3627
expected_toks_per_img: int,
3728
num_imgs: int,
3829
):
3930
"""Ensure input_processor_for_phi3v handles num_crops properly."""
31+
# Avoid initializing CUDA early
32+
from vllm.model_executor.models.phi3v import _IMAGE_TOKEN_ID
33+
4034
ctx = build_model_context(
4135
model_name=model_id,
4236
tokenizer_name=model_id,
4337
trust_remote_code=True,
4438
limit_mm_per_prompt={"image": num_imgs},
4539
)
46-
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
47-
ctx = InputProcessingContext(ctx.model_config, tokenizer)
40+
tokenizer = cached_get_tokenizer(ctx.model_config.tokenizer)
41+
processor = MULTIMODAL_REGISTRY.create_processor(
42+
ctx.model_config,
43+
tokenizer=tokenizer,
44+
)
4845

4946
# Build the image str / prompt based on the number of images we pass
5047
img_str = "".join([f"<|image_{idx}|>\n" for idx in range(1, num_imgs + 1)])
5148
prompt = f"<|user|>\n{img_str}<|end|>\n<|assistant|>\n"
5249
mm_data = {"image": [image_assets[0].pil_image] * num_imgs}
5350

54-
processor = processor_for_phi3v(ctx)
5551
processed_inputs = processor.apply(prompt, mm_data, mm_processor_kwargs)
5652

5753
# Ensure we have the right number of placeholders per num_crops size

tests/models/decoder_only/vision_language/processing/test_qwen2_vl.py

Lines changed: 8 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,12 @@
11
import pytest
2-
from transformers import AutoTokenizer
32

4-
from vllm.inputs import InputProcessingContext
3+
from vllm.multimodal import MULTIMODAL_REGISTRY
4+
from vllm.multimodal.utils import cached_get_tokenizer
55

66
from .....conftest import _ImageAssets
77
from ....utils import build_model_context
88

99

10-
# Fixtures lazy import to avoid initializing CUDA during test collection
11-
@pytest.fixture()
12-
def processor_for_qwen2_vl():
13-
from vllm.model_executor.models.qwen2_vl import Qwen2VLMultiModalProcessor
14-
return Qwen2VLMultiModalProcessor
15-
16-
1710
@pytest.mark.parametrize("model_id", ["Qwen/Qwen2-VL-2B-Instruct"])
1811
# yapf: disable
1912
@pytest.mark.parametrize(
@@ -24,7 +17,6 @@ def processor_for_qwen2_vl():
2417
# yapf: enable
2518
@pytest.mark.parametrize("num_imgs", [1, 2])
2619
def test_processor_override(
27-
processor_for_qwen2_vl,
2820
image_assets: _ImageAssets,
2921
model_id: str,
3022
mm_processor_kwargs: dict[str, object],
@@ -39,18 +31,20 @@ def test_processor_override(
3931
mm_processor_kwargs=None,
4032
limit_mm_per_prompt={"image": num_imgs},
4133
)
42-
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
43-
ctx = InputProcessingContext(ctx.model_config, tokenizer)
34+
tokenizer = cached_get_tokenizer(ctx.model_config.tokenizer)
35+
processor = MULTIMODAL_REGISTRY.create_processor(
36+
ctx.model_config,
37+
tokenizer=tokenizer,
38+
)
4439

4540
# Build the image str / prompt based on the number of images we pass
4641
prompt = "<|vision_start|><|image_pad|><|vision_end|>" * num_imgs
4742
mm_data = {"image": [image_assets[0].pil_image] * num_imgs}
4843

49-
processor = processor_for_qwen2_vl(ctx)
5044
processed_inputs = processor.apply(prompt, mm_data, mm_processor_kwargs)
5145

5246
# Ensure we have the right number of placeholders per num_crops size
53-
hf_processor = processor._get_hf_processor(**mm_processor_kwargs)
47+
hf_processor = processor.info.get_hf_processor(**mm_processor_kwargs)
5448
image_token_id = tokenizer.convert_tokens_to_ids(hf_processor.image_token)
5549
img_tok_count = processed_inputs["prompt_token_ids"].count(image_token_id)
5650
pixel_shape = processed_inputs["mm_kwargs"]["pixel_values"].shape

vllm/model_executor/models/phi3v.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -322,6 +322,7 @@ def get_mm_max_tokens_per_item(self, seq_len: int) -> Mapping[str, int]:
322322
max_image_tokens = self.get_num_image_tokens(
323323
image_width=target_width,
324324
image_height=target_height,
325+
processor=None,
325326
)
326327

327328
return {"image": max_image_tokens}
@@ -331,8 +332,10 @@ def get_num_image_tokens(
331332
*,
332333
image_width: int,
333334
image_height: int,
335+
processor: Optional[ProcessorMixin],
334336
) -> int:
335-
processor = self.get_hf_processor()
337+
if processor is None:
338+
processor = self.get_hf_processor()
336339

337340
return processor.calc_num_image_tokens_from_image_size( # type: ignore
338341
width=image_width,
@@ -431,6 +434,7 @@ def get_replacement_phi3v(item_idx: int):
431434
num_image_tokens = self.info.get_num_image_tokens(
432435
image_width=image_size.width,
433436
image_height=image_size.height,
437+
processor=hf_processor,
434438
)
435439

436440
return [_IMAGE_TOKEN_ID] * num_image_tokens + [bos_token_id]

vllm/model_executor/models/qwen2_vl.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -763,15 +763,17 @@ def _get_vision_info(
763763
image_height: int,
764764
num_frames: int = 1,
765765
do_resize: bool = True,
766+
image_processor: Optional[Qwen2VLImageProcessor],
766767
) -> tuple[ImageSize, int]:
768+
if image_processor is None:
769+
image_processor = self.get_image_processor()
770+
767771
hf_config = self.get_hf_config()
768772
vision_config = hf_config.vision_config
769773
patch_size = vision_config.patch_size
770774
merge_size = vision_config.spatial_merge_size
771775
temporal_patch_size = vision_config.temporal_patch_size
772776

773-
image_processor = self.get_image_processor()
774-
775777
if do_resize:
776778
resized_height, resized_width = smart_resize(
777779
height=image_height,
@@ -800,10 +802,12 @@ def get_num_image_tokens(
800802
*,
801803
image_width: int,
802804
image_height: int,
805+
image_processor: Optional[Qwen2VLImageProcessor],
803806
) -> int:
804807
_, num_image_tokens = self._get_vision_info(
805808
image_width=image_width,
806809
image_height=image_height,
810+
image_processor=image_processor,
807811
)
808812
return num_image_tokens
809813

@@ -813,18 +817,21 @@ def get_num_video_tokens(
813817
image_width: int,
814818
image_height: int,
815819
num_frames: int,
820+
image_processor: Optional[Qwen2VLImageProcessor],
816821
) -> int:
817822
_, num_video_tokens = self._get_vision_info(
818823
image_width=image_width,
819824
image_height=image_height,
820825
num_frames=num_frames,
826+
image_processor=image_processor,
821827
)
822828
return num_video_tokens
823829

824830
def get_image_size_with_most_features(self) -> ImageSize:
825831
max_image_size, _ = self._get_vision_info(
826832
image_width=9999999,
827833
image_height=9999999,
834+
image_processor=None,
828835
)
829836
return max_image_size
830837

@@ -834,6 +841,7 @@ def get_max_image_tokens(self) -> int:
834841
return self.get_num_image_tokens(
835842
image_width=target_width,
836843
image_height=target_height,
844+
image_processor=None,
837845
)
838846

839847
def _get_max_video_frames(self, max_tokens: int) -> int:
@@ -847,6 +855,7 @@ def _get_max_video_frames(self, max_tokens: int) -> int:
847855
image_width=target_width,
848856
image_height=target_height,
849857
num_frames=next_num_frames,
858+
image_processor=None,
850859
)
851860

852861
if next_max_tokens > max_tokens:
@@ -880,6 +889,7 @@ def get_max_video_tokens(self, seq_len: int) -> int:
880889
image_width=target_width,
881890
image_height=target_height,
882891
num_frames=self.get_num_frames_with_most_features(seq_len),
892+
image_processor=None,
883893
)
884894

885895

0 commit comments

Comments
 (0)