|
18 | 18 |
|
19 | 19 | import numpy as np
|
20 | 20 | import pytest
|
| 21 | +from huggingface_hub import hf_hub_download |
21 | 22 |
|
22 | 23 | from diffusers import BitsAndBytesConfig, DiffusionPipeline, FluxTransformer2DModel, SD3Transformer2DModel, logging
|
23 | 24 | from diffusers.utils import is_accelerate_version
|
|
30 | 31 | numpy_cosine_similarity_distance,
|
31 | 32 | require_accelerate,
|
32 | 33 | require_bitsandbytes_version_greater,
|
| 34 | + require_peft_version_greater, |
33 | 35 | require_torch,
|
34 | 36 | require_torch_gpu,
|
35 | 37 | require_transformers_version_greater,
|
@@ -509,6 +511,29 @@ def test_quality(self):
|
509 | 511 | max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice)
|
510 | 512 | self.assertTrue(max_diff < 1e-3)
|
511 | 513 |
|
| 514 | + @require_peft_version_greater("0.14.0") |
| 515 | + def test_lora_loading(self): |
| 516 | + self.pipeline_8bit.load_lora_weights( |
| 517 | + hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"), adapter_name="hyper-sd" |
| 518 | + ) |
| 519 | + self.pipeline_8bit.set_adapters("hyper-sd", adapter_weights=0.125) |
| 520 | + |
| 521 | + output = self.pipeline_8bit( |
| 522 | + prompt=self.prompt, |
| 523 | + height=256, |
| 524 | + width=256, |
| 525 | + max_sequence_length=64, |
| 526 | + output_type="np", |
| 527 | + num_inference_steps=8, |
| 528 | + generator=torch.manual_seed(42), |
| 529 | + ).images |
| 530 | + out_slice = output[0, -3:, -3:, -1].flatten() |
| 531 | + |
| 532 | + expected_slice = np.array([0.3916, 0.3916, 0.3887, 0.4243, 0.4155, 0.4233, 0.4570, 0.4531, 0.4248]) |
| 533 | + |
| 534 | + max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) |
| 535 | + self.assertTrue(max_diff < 1e-3) |
| 536 | + |
512 | 537 |
|
513 | 538 | @slow
|
514 | 539 | class BaseBnb8bitSerializationTests(Base8bitTests):
|
|
0 commit comments