@@ -322,7 +322,7 @@ def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict
322
322
323
323
# fill attn processors
324
324
attn_processors = {}
325
- ff_layers = []
325
+ non_attn_lora_layers = []
326
326
327
327
is_lora = all ("lora" in k for k in state_dict .keys ())
328
328
is_custom_diffusion = any ("custom_diffusion" in k for k in state_dict .keys ())
@@ -350,7 +350,7 @@ def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict
350
350
for sub_key in key .split ("." ):
351
351
attn_processor = getattr (attn_processor , sub_key )
352
352
353
- # Process FF layers
353
+ # Process non-attention layers
354
354
if "lora.down.weight" in value_dict :
355
355
rank = value_dict ["lora.down.weight" ].shape [0 ]
356
356
hidden_size = value_dict ["lora.up.weight" ].shape [0 ]
@@ -366,7 +366,7 @@ def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict
366
366
367
367
value_dict = {k .replace ("lora." , "" ): v for k , v in value_dict .items ()}
368
368
lora .load_state_dict (value_dict )
369
- ff_layers .append ((attn_processor , lora ))
369
+ non_attn_lora_layers .append ((attn_processor , lora ))
370
370
continue
371
371
372
372
rank = value_dict ["to_k_lora.down.weight" ].shape [0 ]
@@ -428,13 +428,13 @@ def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict
428
428
429
429
# set correct dtype & device
430
430
attn_processors = {k : v .to (device = self .device , dtype = self .dtype ) for k , v in attn_processors .items ()}
431
- ff_layers = [(t , l .to (device = self .device , dtype = self .dtype )) for t , l in ff_layers ]
431
+ non_attn_lora_layers = [(t , l .to (device = self .device , dtype = self .dtype )) for t , l in non_attn_lora_layers ]
432
432
433
433
# set layers
434
434
self .set_attn_processor (attn_processors )
435
435
436
436
# set ff layers
437
- for target_module , lora_layer in ff_layers :
437
+ for target_module , lora_layer in non_attn_lora_layers :
438
438
if hasattr (target_module , "set_lora_layer" ):
439
439
target_module .set_lora_layer (lora_layer )
440
440
0 commit comments