Skip to content

Commit 4f5e3e3

Browse files
authored
Regarding the RunwayML path for V1.5 did change to stable-diffusion-v1-5/[stable-diffusion-v1-5/ stable-diffusion-inpainting] (#10476)
* Update pipeline_controlnet.py * Update pipeline_controlnet_img2img.py runwayml Take-down so change all from to this stable-diffusion-v1-5/stable-diffusion-v1-5 * Update pipeline_controlnet_inpaint.py * runwayml take-down make change to sd-legacy * runwayml take-down make change to sd-legacy * runwayml take-down make change to sd-legacy * runwayml take-down make change to sd-legacy * Update convert_blipdiffusion_to_diffusers.py style change
1 parent 8f2253c commit 4f5e3e3

File tree

37 files changed

+87
-88
lines changed

37 files changed

+87
-88
lines changed

Diff for: examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ def save_model_card(
160160
from diffusers import AutoPipelineForText2Image
161161
import torch
162162
{diffusers_imports_pivotal}
163-
pipeline = AutoPipelineForText2Image.from_pretrained('runwayml/stable-diffusion-v1-5', torch_dtype=torch.float16).to('cuda')
163+
pipeline = AutoPipelineForText2Image.from_pretrained('stable-diffusion-v1-5/stable-diffusion-v1-5', torch_dtype=torch.float16).to('cuda')
164164
pipeline.load_lora_weights('{repo_id}', weight_name='pytorch_lora_weights.safetensors')
165165
{diffusers_example_pivotal}
166166
image = pipeline('{validation_prompt if validation_prompt else instance_prompt}').images[0]

Diff for: scripts/convert_blipdiffusion_to_diffusers.py

+4-5
Original file line numberDiff line numberDiff line change
@@ -303,10 +303,9 @@ def save_blip_diffusion_model(model, args):
303303
qformer = get_qformer(model)
304304
qformer.eval()
305305

306-
text_encoder = ContextCLIPTextModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="text_encoder")
307-
vae = AutoencoderKL.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="vae")
308-
309-
unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet")
306+
text_encoder = ContextCLIPTextModel.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="text_encoder")
307+
vae = AutoencoderKL.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="vae")
308+
unet = UNet2DConditionModel.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="unet")
310309
vae.eval()
311310
text_encoder.eval()
312311
scheduler = PNDMScheduler(
@@ -316,7 +315,7 @@ def save_blip_diffusion_model(model, args):
316315
set_alpha_to_one=False,
317316
skip_prk_steps=True,
318317
)
319-
tokenizer = CLIPTokenizer.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="tokenizer")
318+
tokenizer = CLIPTokenizer.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="tokenizer")
320319
image_processor = BlipImageProcessor()
321320
blip_diffusion = BlipDiffusionPipeline(
322321
tokenizer=tokenizer,

Diff for: src/diffusers/loaders/single_file.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -329,7 +329,7 @@ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
329329
330330
>>> # Enable float16 and move to GPU
331331
>>> pipeline = StableDiffusionPipeline.from_single_file(
332-
... "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt",
332+
... "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt",
333333
... torch_dtype=torch.float16,
334334
... )
335335
>>> pipeline.to("cuda")

Diff for: src/diffusers/loaders/textual_inversion.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -333,7 +333,7 @@ def load_textual_inversion(
333333
from diffusers import StableDiffusionPipeline
334334
import torch
335335
336-
model_id = "runwayml/stable-diffusion-v1-5"
336+
model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
337337
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
338338
339339
pipe.load_textual_inversion("sd-concepts-library/cat-toy")
@@ -352,7 +352,7 @@ def load_textual_inversion(
352352
from diffusers import StableDiffusionPipeline
353353
import torch
354354
355-
model_id = "runwayml/stable-diffusion-v1-5"
355+
model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
356356
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
357357
358358
pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2")
@@ -469,7 +469,7 @@ def unload_textual_inversion(
469469
from diffusers import AutoPipelineForText2Image
470470
import torch
471471
472-
pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5")
472+
pipeline = AutoPipelineForText2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
473473
474474
# Example 1
475475
pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork")

Diff for: src/diffusers/models/autoencoders/consistency_decoder_vae.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ class ConsistencyDecoderVAE(ModelMixin, ConfigMixin):
6060
6161
>>> vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16)
6262
>>> pipe = StableDiffusionPipeline.from_pretrained(
63-
... "runwayml/stable-diffusion-v1-5", vae=vae, torch_dtype=torch.float16
63+
... "stable-diffusion-v1-5/stable-diffusion-v1-5", vae=vae, torch_dtype=torch.float16
6464
... ).to("cuda")
6565
6666
>>> image = pipe("horse", generator=torch.manual_seed(0)).images[0]

Diff for: src/diffusers/pipelines/auto_pipeline.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@ def from_pretrained(cls, pretrained_model_or_path, **kwargs):
293293
If you get the error message below, you need to finetune the weights for your downstream task:
294294
295295
```
296-
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
296+
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
297297
- conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
298298
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
299299
```
@@ -385,7 +385,7 @@ def from_pretrained(cls, pretrained_model_or_path, **kwargs):
385385
```py
386386
>>> from diffusers import AutoPipelineForText2Image
387387
388-
>>> pipeline = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5")
388+
>>> pipeline = AutoPipelineForText2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
389389
>>> image = pipeline(prompt).images[0]
390390
```
391391
"""
@@ -448,7 +448,7 @@ def from_pipe(cls, pipeline, **kwargs):
448448
>>> from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
449449
450450
>>> pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
451-
... "runwayml/stable-diffusion-v1-5", requires_safety_checker=False
451+
... "stable-diffusion-v1-5/stable-diffusion-v1-5", requires_safety_checker=False
452452
... )
453453
454454
>>> pipe_t2i = AutoPipelineForText2Image.from_pipe(pipe_i2i)
@@ -589,7 +589,7 @@ def from_pretrained(cls, pretrained_model_or_path, **kwargs):
589589
If you get the error message below, you need to finetune the weights for your downstream task:
590590
591591
```
592-
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
592+
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
593593
- conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
594594
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
595595
```
@@ -681,7 +681,7 @@ def from_pretrained(cls, pretrained_model_or_path, **kwargs):
681681
```py
682682
>>> from diffusers import AutoPipelineForImage2Image
683683
684-
>>> pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5")
684+
>>> pipeline = AutoPipelineForImage2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
685685
>>> image = pipeline(prompt, image).images[0]
686686
```
687687
"""
@@ -756,7 +756,7 @@ def from_pipe(cls, pipeline, **kwargs):
756756
>>> from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
757757
758758
>>> pipe_t2i = AutoPipelineForText2Image.from_pretrained(
759-
... "runwayml/stable-diffusion-v1-5", requires_safety_checker=False
759+
... "stable-diffusion-v1-5/stable-diffusion-v1-5", requires_safety_checker=False
760760
... )
761761
762762
>>> pipe_i2i = AutoPipelineForImage2Image.from_pipe(pipe_t2i)
@@ -900,7 +900,7 @@ def from_pretrained(cls, pretrained_model_or_path, **kwargs):
900900
If you get the error message below, you need to finetune the weights for your downstream task:
901901
902902
```
903-
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
903+
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
904904
- conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
905905
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
906906
```
@@ -992,7 +992,7 @@ def from_pretrained(cls, pretrained_model_or_path, **kwargs):
992992
```py
993993
>>> from diffusers import AutoPipelineForInpainting
994994
995-
>>> pipeline = AutoPipelineForInpainting.from_pretrained("runwayml/stable-diffusion-v1-5")
995+
>>> pipeline = AutoPipelineForInpainting.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
996996
>>> image = pipeline(prompt, image=init_image, mask_image=mask_image).images[0]
997997
```
998998
"""

Diff for: src/diffusers/pipelines/controlnet/pipeline_controlnet.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@
8080
>>> # load control net and stable diffusion v1-5
8181
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
8282
>>> pipe = StableDiffusionControlNetPipeline.from_pretrained(
83-
... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
83+
... "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
8484
... )
8585
8686
>>> # speed up diffusion process with faster scheduler and memory optimization
@@ -198,7 +198,7 @@ class StableDiffusionControlNetPipeline(
198198
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
199199
safety_checker ([`StableDiffusionSafetyChecker`]):
200200
Classification module that estimates whether generated images could be considered offensive or harmful.
201-
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
201+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
202202
about a model's potential harms.
203203
feature_extractor ([`~transformers.CLIPImageProcessor`]):
204204
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.

Diff for: src/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@
7171
>>> # load control net and stable diffusion v1-5
7272
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
7373
>>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
74-
... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
74+
... "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
7575
... )
7676
7777
>>> # speed up diffusion process with faster scheduler and memory optimization
@@ -168,7 +168,7 @@ class StableDiffusionControlNetImg2ImgPipeline(
168168
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
169169
safety_checker ([`StableDiffusionSafetyChecker`]):
170170
Classification module that estimates whether generated images could be considered offensive or harmful.
171-
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
171+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
172172
about a model's potential harms.
173173
feature_extractor ([`~transformers.CLIPImageProcessor`]):
174174
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.

Diff for: src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@
8383
... "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16
8484
... )
8585
>>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
86-
... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
86+
... "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
8787
... )
8888
8989
>>> pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
@@ -141,9 +141,9 @@ class StableDiffusionControlNetInpaintPipeline(
141141
<Tip>
142142
143143
This pipeline can be used with checkpoints that have been specifically fine-tuned for inpainting
144-
([runwayml/stable-diffusion-inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting)) as well as
144+
([stable-diffusion-v1-5/stable-diffusion-inpainting](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-inpainting)) as well as
145145
default text-to-image Stable Diffusion checkpoints
146-
([runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5)). Default text-to-image
146+
([stable-diffusion-v1-5/stable-diffusion-v1-5](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5)). Default text-to-image
147147
Stable Diffusion checkpoints might be preferable for ControlNets that have been fine-tuned on those, such as
148148
[lllyasviel/control_v11p_sd15_inpaint](https://huggingface.co/lllyasviel/control_v11p_sd15_inpaint).
149149
@@ -167,7 +167,7 @@ class StableDiffusionControlNetInpaintPipeline(
167167
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
168168
safety_checker ([`StableDiffusionSafetyChecker`]):
169169
Classification module that estimates whether generated images could be considered offensive or harmful.
170-
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
170+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
171171
about a model's potential harms.
172172
feature_extractor ([`~transformers.CLIPImageProcessor`]):
173173
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.

Diff for: src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint_sd_xl.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1622,7 +1622,7 @@ def denoising_value_valid(dnv):
16221622

16231623
# 8. Check that sizes of mask, masked image and latents match
16241624
if num_channels_unet == 9:
1625-
# default case for runwayml/stable-diffusion-inpainting
1625+
# default case for stable-diffusion-v1-5/stable-diffusion-inpainting
16261626
num_channels_mask = mask.shape[1]
16271627
num_channels_masked_image = masked_image_latents.shape[1]
16281628
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:

Diff for: src/diffusers/pipelines/controlnet/pipeline_flax_controlnet.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@
7575
... "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.float32
7676
... )
7777
>>> pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
78-
... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.float32
78+
... "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.float32
7979
... )
8080
>>> params["controlnet"] = controlnet_params
8181
@@ -132,7 +132,7 @@ class FlaxStableDiffusionControlNetPipeline(FlaxDiffusionPipeline):
132132
[`FlaxDPMSolverMultistepScheduler`].
133133
safety_checker ([`FlaxStableDiffusionSafetyChecker`]):
134134
Classification module that estimates whether generated images could be considered offensive or harmful.
135-
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
135+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
136136
about a model's potential harms.
137137
feature_extractor ([`~transformers.CLIPImageProcessor`]):
138138
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.

Diff for: src/diffusers/pipelines/pipeline_flax_utils.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -237,14 +237,14 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
237237
If you get the error message below, you need to finetune the weights for your downstream task:
238238
239239
```
240-
Some weights of FlaxUNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
240+
Some weights of FlaxUNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
241241
```
242242
243243
Parameters:
244244
pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
245245
Can be either:
246246
247-
- A string, the *repo id* (for example `runwayml/stable-diffusion-v1-5`) of a pretrained pipeline
247+
- A string, the *repo id* (for example `stable-diffusion-v1-5/stable-diffusion-v1-5`) of a pretrained pipeline
248248
hosted on the Hub.
249249
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
250250
using [`~FlaxDiffusionPipeline.save_pretrained`].
@@ -293,15 +293,15 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
293293
>>> # Requires to be logged in to Hugging Face hub,
294294
>>> # see more in [the documentation](https://huggingface.co/docs/hub/security-tokens)
295295
>>> pipeline, params = FlaxDiffusionPipeline.from_pretrained(
296-
... "runwayml/stable-diffusion-v1-5",
296+
... "stable-diffusion-v1-5/stable-diffusion-v1-5",
297297
... variant="bf16",
298298
... dtype=jnp.bfloat16,
299299
... )
300300
301301
>>> # Download pipeline, but use a different scheduler
302302
>>> from diffusers import FlaxDPMSolverMultistepScheduler
303303
304-
>>> model_id = "runwayml/stable-diffusion-v1-5"
304+
>>> model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
305305
>>> dpmpp, dpmpp_state = FlaxDPMSolverMultistepScheduler.from_pretrained(
306306
... model_id,
307307
... subfolder="scheduler",
@@ -559,7 +559,7 @@ def components(self) -> Dict[str, Any]:
559559
... )
560560
561561
>>> text2img = FlaxStableDiffusionPipeline.from_pretrained(
562-
... "runwayml/stable-diffusion-v1-5", variant="bf16", dtype=jnp.bfloat16
562+
... "stable-diffusion-v1-5/stable-diffusion-v1-5", variant="bf16", dtype=jnp.bfloat16
563563
... )
564564
>>> img2img = FlaxStableDiffusionImg2ImgPipeline(**text2img.components)
565565
```

Diff for: src/diffusers/pipelines/pipeline_loading_utils.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -813,9 +813,9 @@ def _maybe_raise_warning_for_inpainting(pipeline_class, pretrained_model_name_or
813813
"You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the"
814814
f" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For"
815815
" better inpainting results, we strongly suggest using Stable Diffusion's official inpainting"
816-
" checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your"
816+
" checkpoint: https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-inpainting instead or adapting your"
817817
f" checkpoint {pretrained_model_name_or_path} to the format of"
818-
" https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain"
818+
" https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-inpainting. Note that we do not actively maintain"
819819
" the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0."
820820
)
821821
deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", deprecation_message, standard_warn=False)

0 commit comments

Comments
 (0)