Skip to content

Commit fa41a91

Browse files
authored
fix quantlora bug (#10562)
1 parent aa9d8d5 commit fa41a91

File tree

1 file changed

+4
-0
lines changed

1 file changed

+4
-0
lines changed

paddlenlp/peft/lora/lora_model.py

+4
Original file line numberDiff line numberDiff line change
@@ -607,8 +607,12 @@ def _find_and_replace_module(self, model, module_name, lora_config):
607607
lora_module = QuantizationLoRALinear(module, lora_config)
608608
elif isinstance(module, ColumnParallelQuantizationLinear):
609609
lora_module = ColumnParallelQuantizationLoRALinear(module, lora_config)
610+
# Lora column parallel will spilt lora B matrix
611+
self.add_lora_split_mapping(module_name + ".lora_B", is_column=True)
610612
elif isinstance(module, RowParallelQuantizationLinear):
611613
lora_module = RowParallelQuantizationLoRALinear(module, lora_config)
614+
# Lora row parallel will spilt lora A matrix
615+
self.add_lora_split_mapping(module_name + ".lora_A", is_column=False)
612616
if lora_module is None:
613617
raise ValueError(
614618
f"LoRA strategy only supports paddle.nn.Linear or paddle.distributed.fleet.meta_parallel.ColumnParallelLinear or paddlenlp.transformers.sequence_utils. {module}({module_name} {type(module).__name__}) is not supported。"

0 commit comments

Comments
 (0)