From feccd5446f4c8784c2f096cd11608dd49f36c64f Mon Sep 17 00:00:00 2001 From: Aryan Date: Sat, 4 Jan 2025 23:46:34 +0100 Subject: [PATCH 1/2] fix --- src/diffusers/models/transformers/transformer_hunyuan_video.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/diffusers/models/transformers/transformer_hunyuan_video.py b/src/diffusers/models/transformers/transformer_hunyuan_video.py index e3f24d97f3fa..6cb97af93652 100644 --- a/src/diffusers/models/transformers/transformer_hunyuan_video.py +++ b/src/diffusers/models/transformers/transformer_hunyuan_video.py @@ -721,6 +721,7 @@ def forward( for i in range(batch_size): attention_mask[i, : effective_sequence_length[i], : effective_sequence_length[i]] = True + attention_mask = attention_mask.unsqueeze(1) # [B, 1, N, N], for broadcasting across attention heads # 4. Transformer blocks if torch.is_grad_enabled() and self.gradient_checkpointing: From 6840c552cb064406de3e33d973e51605fb629117 Mon Sep 17 00:00:00 2001 From: Aryan Date: Sat, 4 Jan 2025 23:53:44 +0100 Subject: [PATCH 2/2] add coauthor Co-Authored-By: Nerogar