@@ -114,7 +114,7 @@ def parse_fine_tuned_lora_name(
114
114
is_bias whether the tensor is lora bias.
115
115
"""
116
116
117
- # LoRA weight qualified name always starts with `base_model.model.`,
117
+ # LoRA weight qualified name usually starts with `base_model.model.`,
118
118
# so we remove the prefix `base_model.model.` to make the following
119
119
# mapping correctly.
120
120
if "base_model.model." in name :
@@ -123,18 +123,23 @@ def parse_fine_tuned_lora_name(
123
123
# recover the prefix `base_model.model.`
124
124
name = "base_model.model." + name
125
125
126
+ # In some situations, we may not start with `base_model.model.`.
127
+ # If we don't (e.g., ibm-granite/granite-speech-3.3-8b),
128
+ # we should keep the prefix intact.
129
+ start_index = 2 if "base_model.model." in name else 0
130
+
126
131
parts = name .split ("." )
127
132
if parts [- 1 ] == "weight" and (parts [- 2 ] == "lora_A"
128
133
or parts [- 2 ] == "lora_B" ):
129
- new_name = "." .join (parts [2 :- 2 ])
134
+ new_name = "." .join (parts [start_index :- 2 ])
130
135
return new_name , parts [- 2 ] == "lora_A" , False
131
136
132
137
if parts [- 1 ] == "lora_embedding_A" or parts [- 1 ] == "lora_embedding_B" :
133
- new_name = "." .join (parts [2 :- 1 ])
138
+ new_name = "." .join (parts [start_index :- 1 ])
134
139
return new_name , parts [- 1 ] == "lora_embedding_A" , False
135
140
136
141
if parts [- 1 ] == "bias" :
137
- new_name = "." .join (parts [2 :- 2 ])
142
+ new_name = "." .join (parts [start_index :- 2 ])
138
143
return new_name , False , True
139
144
140
145
raise ValueError (f"{ name } is unsupported LoRA weight" )
0 commit comments