Skip to content

Commit 001e63a

Browse files
Update comment
Signed-off-by: Alex-Brooks <[email protected]>
1 parent b4bc64a commit 001e63a

File tree

1 file changed

+3
-4
lines changed

1 file changed

+3
-4
lines changed

vllm/lora/utils.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ def parse_fine_tuned_lora_name(
114114
is_bias whether the tensor is lora bias.
115115
"""
116116

117-
# LoRA weight qualified name usually start with `base_model.model.`,
117+
# LoRA weight qualified name usually starts with `base_model.model.`,
118118
# so we remove the prefix `base_model.model.` to make the following
119119
# mapping correctly.
120120
if "base_model.model." in name:
@@ -123,9 +123,8 @@ def parse_fine_tuned_lora_name(
123123
# recover the prefix `base_model.model.`
124124
name = "base_model.model." + name
125125

126-
# In some situations, we may not start with `base_model.model.`, depending
127-
# on if the model is intended to be loaded through the transformers peft
128-
# integration; if it's the latter, we should take the whole prefix.
126+
# In some situations, we may not start with `base_model.model.`.
127+
# If we don't, we should keep the prefix intact.
129128
start_index = 2 if "base_model.model." in name else 0
130129

131130
parts = name.split(".")

0 commit comments

Comments
 (0)