24
24
25
25
from invokeai .app .services .config import InvokeAIAppConfig
26
26
from invokeai .backend .ip_adapter .ip_adapter import IPAdapter
27
- from invokeai .backend .ip_adapter .unet_patcher import apply_ip_adapter_attention
27
+ from invokeai .backend .ip_adapter .unet_patcher import Scales , apply_ip_adapter_attention
28
28
from invokeai .backend .stable_diffusion .diffusion .conditioning_data import ConditioningData
29
29
30
30
from ..util import auto_detect_slice_size , normalize_device
@@ -426,7 +426,7 @@ def generate_latents_from_embeddings(
426
426
return latents , attention_map_saver
427
427
428
428
if conditioning_data .extra is not None and conditioning_data .extra .wants_cross_attention_control :
429
- attn_ctx = self .invokeai_diffuser .custom_attention_context (
429
+ attn_ctx_mgr = self .invokeai_diffuser .custom_attention_context (
430
430
self .invokeai_diffuser .model ,
431
431
extra_conditioning_info = conditioning_data .extra ,
432
432
step_count = len (self .scheduler .timesteps ),
@@ -435,14 +435,14 @@ def generate_latents_from_embeddings(
435
435
elif ip_adapter_data is not None :
436
436
# TODO(ryand): Should we raise an exception if both custom attention and IP-Adapter attention are active?
437
437
# As it is now, the IP-Adapter will silently be skipped.
438
- attn_ctx = apply_ip_adapter_attention (
438
+ attn_ctx_mgr = apply_ip_adapter_attention (
439
439
unet = self .invokeai_diffuser .model , ip_adapters = [ipa .ip_adapter_model for ipa in ip_adapter_data ]
440
440
)
441
441
self .use_ip_adapter = True
442
442
else :
443
- attn_ctx = nullcontext ()
443
+ attn_ctx_mgr = nullcontext ()
444
444
445
- with attn_ctx :
445
+ with attn_ctx_mgr as attn_ctx :
446
446
if callback is not None :
447
447
callback (
448
448
PipelineIntermediateState (
@@ -467,6 +467,7 @@ def generate_latents_from_embeddings(
467
467
control_data = control_data ,
468
468
ip_adapter_data = ip_adapter_data ,
469
469
t2i_adapter_data = t2i_adapter_data ,
470
+ attn_ctx = attn_ctx ,
470
471
)
471
472
latents = step_output .prev_sample
472
473
@@ -514,6 +515,7 @@ def step(
514
515
control_data : List [ControlNetData ] = None ,
515
516
ip_adapter_data : Optional [list [IPAdapterData ]] = None ,
516
517
t2i_adapter_data : Optional [list [T2IAdapterData ]] = None ,
518
+ attn_ctx : Optional [Scales ] = None ,
517
519
):
518
520
# invokeai_diffuser has batched timesteps, but diffusers schedulers expect a single value
519
521
timestep = t [0 ]
@@ -526,7 +528,7 @@ def step(
526
528
527
529
# handle IP-Adapter
528
530
if self .use_ip_adapter and ip_adapter_data is not None : # somewhat redundant but logic is clearer
529
- for single_ip_adapter_data in ip_adapter_data :
531
+ for i , single_ip_adapter_data in enumerate ( ip_adapter_data ) :
530
532
first_adapter_step = math .floor (single_ip_adapter_data .begin_step_percent * total_step_count )
531
533
last_adapter_step = math .ceil (single_ip_adapter_data .end_step_percent * total_step_count )
532
534
weight = (
@@ -536,10 +538,10 @@ def step(
536
538
)
537
539
if step_index >= first_adapter_step and step_index <= last_adapter_step :
538
540
# Only apply this IP-Adapter if the current step is within the IP-Adapter's begin/end step range.
539
- single_ip_adapter_data . ip_adapter_model . attn_weights . set_scale ( weight )
541
+ attn_ctx . scales [ i ] = weight
540
542
else :
541
543
# Otherwise, set the IP-Adapter's scale to 0, so it has no effect.
542
- single_ip_adapter_data . ip_adapter_model . attn_weights . set_scale ( 0.0 )
544
+ attn_ctx . scales [ i ] = weight
543
545
544
546
# Handle ControlNet(s) and T2I-Adapter(s)
545
547
down_block_additional_residuals = None
0 commit comments