Skip to content

Commit 5bbf7fe

Browse files
authored
[Bugfix] Renames in 0.15.0 diffusers (#3184)
Link to PR in diffusers repository: huggingface/diffusers#2691 Imports: `diffusers.models.cross_attention -> diffusers.models.attention_processor` Unions: `AttnProcessor -> AttentionProcessor` Classes: | Old name | New name | | --- | --- | | CrossAttention | Attention | | CrossAttnProcessor | AttnProcessor | | XFormersCrossAttnProcessor | XFormersAttnProcessor | | CrossAttnAddedKVProcessor | AttnAddedKVProcessor | | LoRACrossAttnProcessor | LoRAAttnProcessor | | LoRAXFormersCrossAttnProcessor | LoRAXFormersAttnProcessor | | FlaxCrossAttention | FlaxAttention | | AttendExciteCrossAttnProcessor | AttendExciteAttnProcessor | | Pix2PixZeroCrossAttnProcessor | Pix2PixZeroAttnProcessor | Also config values no longer sets as attributes of object: huggingface/diffusers#2849
2 parents 6db72f8 + bfb968b commit 5bbf7fe

File tree

5 files changed

+20
-17
lines changed

5 files changed

+20
-17
lines changed

.gitignore

-2
Original file line numberDiff line numberDiff line change
@@ -233,5 +233,3 @@ installer/install.sh
233233
installer/update.bat
234234
installer/update.sh
235235

236-
# no longer stored in source directory
237-
models

ldm/invoke/generator/diffusers_pipeline.py

+11-4
Original file line numberDiff line numberDiff line change
@@ -400,8 +400,15 @@ def device(self) -> torch.device:
400400
@property
401401
def _submodels(self) -> Sequence[torch.nn.Module]:
402402
module_names, _, _ = self.extract_init_dict(dict(self.config))
403-
values = [getattr(self, name) for name in module_names.keys()]
404-
return [m for m in values if isinstance(m, torch.nn.Module)]
403+
submodels = []
404+
for name in module_names.keys():
405+
if hasattr(self, name):
406+
value = getattr(self, name)
407+
else:
408+
value = getattr(self.config, name)
409+
if isinstance(value, torch.nn.Module):
410+
submodels.append(value)
411+
return submodels
405412

406413
def image_from_embeddings(self, latents: torch.Tensor, num_inference_steps: int,
407414
conditioning_data: ConditioningData,
@@ -472,7 +479,7 @@ def generate_latents_from_embeddings(self, latents: torch.Tensor, timesteps,
472479
step_count=len(self.scheduler.timesteps)
473480
):
474481

475-
yield PipelineIntermediateState(run_id=run_id, step=-1, timestep=self.scheduler.num_train_timesteps,
482+
yield PipelineIntermediateState(run_id=run_id, step=-1, timestep=self.scheduler.config.num_train_timesteps,
476483
latents=latents)
477484

478485
batch_size = latents.shape[0]
@@ -756,7 +763,7 @@ def _tokenize(self, prompt: Union[str, List[str]]):
756763
@property
757764
def channels(self) -> int:
758765
"""Compatible with DiffusionWrapper"""
759-
return self.unet.in_channels
766+
return self.unet.config.in_channels
760767

761768
def decode_latents(self, latents):
762769
# Explicit call to get the vae loaded, since `decode` isn't the forward method.

ldm/models/diffusion/cross_attention_control.py

+8-9
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414

1515
from compel.cross_attention_control import Arguments
1616
from diffusers.models.unet_2d_condition import UNet2DConditionModel
17-
from diffusers.models.cross_attention import AttnProcessor
1817
from ldm.invoke.devices import torch_dtype
1918

2019

@@ -163,7 +162,7 @@ def offload_saved_attention_slices_to_cpu(self):
163162

164163
class InvokeAICrossAttentionMixin:
165164
"""
166-
Enable InvokeAI-flavoured CrossAttention calculation, which does aggressive low-memory slicing and calls
165+
Enable InvokeAI-flavoured Attention calculation, which does aggressive low-memory slicing and calls
167166
through both to an attention_slice_wrangler and a slicing_strategy_getter for custom attention map wrangling
168167
and dymamic slicing strategy selection.
169168
"""
@@ -178,7 +177,7 @@ def set_attention_slice_wrangler(self, wrangler: Optional[Callable[[nn.Module, t
178177
Set custom attention calculator to be called when attention is calculated
179178
:param wrangler: Callback, with args (module, suggested_attention_slice, dim, offset, slice_size),
180179
which returns either the suggested_attention_slice or an adjusted equivalent.
181-
`module` is the current CrossAttention module for which the callback is being invoked.
180+
`module` is the current Attention module for which the callback is being invoked.
182181
`suggested_attention_slice` is the default-calculated attention slice
183182
`dim` is -1 if the attenion map has not been sliced, or 0 or 1 for dimension-0 or dimension-1 slicing.
184183
If `dim` is >= 0, `offset` and `slice_size` specify the slice start and length.
@@ -326,7 +325,7 @@ def setup_cross_attention_control_attention_processors(unet: UNet2DConditionMode
326325

327326

328327
def get_cross_attention_modules(model, which: CrossAttentionType) -> list[tuple[str, InvokeAICrossAttentionMixin]]:
329-
from ldm.modules.attention import CrossAttention # avoid circular import
328+
from ldm.modules.attention import CrossAttention # avoid circular import # TODO: rename as in diffusers?
330329
cross_attention_class: type = InvokeAIDiffusersCrossAttention if isinstance(model,UNet2DConditionModel) else CrossAttention
331330
which_attn = "attn1" if which is CrossAttentionType.SELF else "attn2"
332331
attention_module_tuples = [(name,module) for name, module in model.named_modules() if
@@ -432,7 +431,7 @@ def get_mem_free_total(device):
432431

433432

434433

435-
class InvokeAIDiffusersCrossAttention(diffusers.models.attention.CrossAttention, InvokeAICrossAttentionMixin):
434+
class InvokeAIDiffusersCrossAttention(diffusers.models.attention.Attention, InvokeAICrossAttentionMixin):
436435

437436
def __init__(self, **kwargs):
438437
super().__init__(**kwargs)
@@ -457,8 +456,8 @@ def _attention(self, query, key, value, attention_mask=None):
457456
"""
458457
# base implementation
459458
460-
class CrossAttnProcessor:
461-
def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None):
459+
class AttnProcessor:
460+
def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):
462461
batch_size, sequence_length, _ = hidden_states.shape
463462
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length)
464463
@@ -487,7 +486,7 @@ def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=No
487486

488487
import torch
489488

490-
from diffusers.models.cross_attention import CrossAttention, CrossAttnProcessor, SlicedAttnProcessor
489+
from diffusers.models.attention_processor import Attention, AttnProcessor, SlicedAttnProcessor
491490

492491

493492
@dataclass
@@ -532,7 +531,7 @@ class SlicedSwapCrossAttnProcesser(SlicedAttnProcessor):
532531

533532
# TODO: dynamically pick slice size based on memory conditions
534533

535-
def __call__(self, attn: CrossAttention, hidden_states, encoder_hidden_states=None, attention_mask=None,
534+
def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None,
536535
# kwargs
537536
swap_cross_attn_context: SwapCrossAttnContext=None):
538537

ldm/models/diffusion/shared_invokeai_diffusion.py

-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55

66
import numpy as np
77
import torch
8-
98
from diffusers import UNet2DConditionModel
109
from typing_extensions import TypeAlias
1110

pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ dependencies = [
3434
"clip_anytorch",
3535
"compel~=1.1.0",
3636
"datasets",
37-
"diffusers[torch]==0.14",
37+
"diffusers[torch]~=0.15.0",
3838
"dnspython==2.2.1",
3939
"einops",
4040
"eventlet",

0 commit comments

Comments
 (0)