We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent b4bc64a commit e919ee9Copy full SHA for e919ee9
vllm/lora/utils.py
@@ -123,9 +123,8 @@ def parse_fine_tuned_lora_name(
123
# recover the prefix `base_model.model.`
124
name = "base_model.model." + name
125
126
- # In some situations, we may not start with `base_model.model.`, depending
127
- # on if the model is intended to be loaded through the transformers peft
128
- # integration; if it's the latter, we should take the whole prefix.
+ # In some situations, we may not start with `base_model.model.`.
+ # If we don't, we should keep the prefix intact.
129
start_index = 2 if "base_model.model." in name else 0
130
131
parts = name.split(".")
0 commit comments