Skip to content

Commit 966de49

Browse files
mgoinmzusman
authored andcommitted
Various cosmetic/comment fixes (vllm-project#12089)
Signed-off-by: mgoin <[email protected]>
1 parent ed89575 commit 966de49

File tree

14 files changed

+16
-29
lines changed

14 files changed

+16
-29
lines changed

vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def create_weights(self, layer: torch.nn.Module, input_size: int,
4242

4343
if not sparse_cutlass_supported():
4444
raise ValueError(
45-
"Sparse CUTLASS not supported. vLLM must be built with"
45+
"Sparse CUTLASS not supported. vLLM must be built with "
4646
"CUDA 12.2 or later to use this feature")
4747

4848
self.output_dtype = params_dtype

vllm/model_executor/models/aria.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -390,8 +390,7 @@ def load_weights(self, weights: Iterable[Tuple[str,
390390
continue
391391
if (self.quant_config is not None and
392392
(scale_name := self.quant_config.get_cache_scale(name))):
393-
# Loading kv cache scales for quark and
394-
# compressed-tensors quantization
393+
# Loading kv cache quantization scales
395394
param = params_dict[scale_name]
396395
weight_loader = getattr(param, "weight_loader",
397396
default_weight_loader)

vllm/model_executor/models/commandr.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -440,8 +440,7 @@ def load_weights(self, weights: Iterable[Tuple[str,
440440

441441
if (self.quant_config is not None and
442442
(scale_name := self.quant_config.get_cache_scale(name))):
443-
# Loading kv cache scales for quark and
444-
# compressed-tensors quantization
443+
# Loading kv cache quantization scales
445444
param = params_dict[scale_name]
446445
weight_loader = getattr(param, "weight_loader",
447446
default_weight_loader)

vllm/model_executor/models/dbrx.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -452,8 +452,7 @@ def load_weights(self, weights: Iterable[Tuple[str,
452452
for name, loaded_weight in weights:
453453
if (self.quant_config is not None and
454454
(scale_name := self.quant_config.get_cache_scale(name))):
455-
# Loading kv cache scales for quark and
456-
# compressed-tensors quantization
455+
# Loading kv cache quantization scales
457456
param = params_dict[scale_name]
458457
weight_loader = getattr(param, "weight_loader",
459458
default_weight_loader)

vllm/model_executor/models/exaone.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -533,8 +533,7 @@ def load_weights(self, weights: Iterable[Tuple[str,
533533
continue
534534
if (self.quant_config is not None and
535535
(scale_name := self.quant_config.get_cache_scale(name))):
536-
# Loading kv cache scales for quark and
537-
# compressed-tensors quantization
536+
# Loading kv cache quantization scales
538537
param = params_dict[scale_name]
539538
weight_loader = getattr(param, "weight_loader",
540539
default_weight_loader)

vllm/model_executor/models/gpt_j.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -316,8 +316,7 @@ def load_weights(self, weights: Iterable[Tuple[str,
316316

317317
if (self.quant_config is not None and
318318
(scale_name := self.quant_config.get_cache_scale(name))):
319-
# Loading kv cache scales for quark and
320-
# compressed-tensors quantization
319+
# Loading kv cache quantization scales
321320
param = params_dict[scale_name]
322321
weight_loader = getattr(param, "weight_loader",
323322
default_weight_loader)

vllm/model_executor/models/granite.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -475,8 +475,7 @@ def load_weights(self, weights: Iterable[Tuple[str,
475475
continue
476476
if (self.quant_config is not None and
477477
(scale_name := self.quant_config.get_cache_scale(name))):
478-
# Loading kv cache scales for quark and
479-
# compressed-tensors quantization
478+
# Loading kv cache quantization scales
480479
param = params_dict[scale_name]
481480
weight_loader = getattr(param, "weight_loader",
482481
default_weight_loader)

vllm/model_executor/models/llama.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -105,9 +105,9 @@ def __init__(self,
105105
max_position_embeddings: int = 8192,
106106
quant_config: Optional[QuantizationConfig] = None,
107107
bias: bool = False,
108+
bias_o_proj: bool = False,
108109
cache_config: Optional[CacheConfig] = None,
109-
prefix: str = "",
110-
bias_o_proj: bool = False) -> None:
110+
prefix: str = "") -> None:
111111
super().__init__()
112112
layer_idx = extract_layer_index(prefix)
113113
self.hidden_size = hidden_size
@@ -397,8 +397,7 @@ def load_weights(self, weights: Iterable[Tuple[str,
397397
continue
398398
if (self.quant_config is not None and
399399
(scale_name := self.quant_config.get_cache_scale(name))):
400-
# Loading kv cache scales for quark and
401-
# compressed-tensors quantization
400+
# Loading kv cache quantization scales
402401
param = params_dict[scale_name]
403402
weight_loader = getattr(param, "weight_loader",
404403
default_weight_loader)

vllm/model_executor/models/mixtral.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -431,8 +431,7 @@ def load_weights(self, weights: Iterable[Tuple[str,
431431

432432
if (self.quant_config is not None and
433433
(scale_name := self.quant_config.get_cache_scale(name))):
434-
# Loading kv cache scales for quark and
435-
# compressed-tensors quantization
434+
# Loading kv cache quantization scales
436435
param = params_dict[scale_name]
437436
weight_loader = getattr(param, "weight_loader",
438437
default_weight_loader)

vllm/model_executor/models/mllama.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1432,8 +1432,7 @@ def load_weights(self, weights: Iterable[Tuple[str,
14321432
loaded_weight = loaded_weight.view(loaded_weight.shape[0], -1)
14331433
if (self.quant_config is not None and
14341434
(scale_name := self.quant_config.get_cache_scale(name))):
1435-
# Loading kv cache scales for quark and
1436-
# compressed-tensors quantization
1435+
# Loading kv cache quantization scales
14371436
param = params_dict[scale_name]
14381437
weight_loader = getattr(param, "weight_loader",
14391438
default_weight_loader)

vllm/model_executor/models/nemotron.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -492,8 +492,7 @@ def load_weights(self, weights: Iterable[Tuple[str,
492492
continue
493493
if (self.quant_config is not None and
494494
(scale_name := self.quant_config.get_cache_scale(name))):
495-
# Loading kv cache scales for quark and
496-
# compressed-tensors quantization
495+
# Loading kv cache quantization scales
497496
param = params_dict[scale_name]
498497
weight_loader = getattr(param, "weight_loader",
499498
default_weight_loader)

vllm/model_executor/models/phimoe.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -626,8 +626,7 @@ def load_weights(self, weights: Iterable[Tuple[str,
626626

627627
if (self.quant_config is not None and
628628
(scale_name := self.quant_config.get_cache_scale(name))):
629-
# Loading kv cache scales for quark and
630-
# compressed-tensors quantization
629+
# Loading kv cache quantization scales
631630
param = params_dict[scale_name]
632631
weight_loader = getattr(param, "weight_loader",
633632
default_weight_loader)

vllm/model_executor/models/qwen2.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -367,8 +367,7 @@ def load_weights(self, weights: Iterable[Tuple[str,
367367
continue
368368
if (self.quant_config is not None and
369369
(scale_name := self.quant_config.get_cache_scale(name))):
370-
# Loading kv cache scales for quark and
371-
# compressed-tensors quantization
370+
# Loading kv cache quantization scales
372371
param = params_dict[scale_name]
373372
weight_loader = getattr(param, "weight_loader",
374373
default_weight_loader)

vllm/model_executor/models/solar.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -492,8 +492,7 @@ def load_weights(self, weights: Iterable[Tuple[str,
492492
continue
493493
if (self.quant_config is not None and
494494
(scale_name := self.quant_config.get_cache_scale(name))):
495-
# Loading kv cache scales for quark and
496-
# compressed-tensors quantization
495+
# Loading kv cache quantization scales
497496
param = params_dict[scale_name]
498497
weight_loader = getattr(param, "weight_loader",
499498
default_weight_loader)

0 commit comments

Comments
 (0)