Skip to content

Commit 66bc6a5

Browse files
committed
closer.. fixing circular imports
1 parent 9e514b3 commit 66bc6a5

File tree

4 files changed

+17
-8
lines changed

4 files changed

+17
-8
lines changed

Diff for: modules/api/ray.py

+1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
from modules import initialize
99

1010

11+
1112
initialize.imports()
1213

1314
initialize.check_versions()

Diff for: modules/processing.py

+8-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng
2020
from modules.rng import slerp # noqa: F401
2121
from modules.sd_hijack import model_hijack
22-
from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes
2322
from modules.shared import opts, cmd_opts, state
2423
import modules.shared as shared
2524
import modules.paths as paths
@@ -89,6 +88,7 @@ def create_binary_mask(image):
8988
return image
9089

9190
def txt2img_image_conditioning(sd_model, x, width, height):
91+
from modules.sd_samplers_common import images_tensor_to_samples, approximation_indexes
9292
if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models
9393

9494
# The "masked-image" in this case will just be all 0.5 since the entire image is masked.
@@ -277,6 +277,7 @@ def txt2img_image_conditioning(self, x, width=None, height=None):
277277
return txt2img_image_conditioning(self.sd_model, x, width or self.width, height or self.height)
278278

279279
def depth2img_image_conditioning(self, source_image):
280+
from modules.sd_samplers_common import images_tensor_to_samples, approximation_indexes
280281
# Use the AddMiDaS helper to Format our source image to suit the MiDaS model
281282
transformer = AddMiDaS(model_type="dpt_hybrid")
282283
transformed = transformer({"jpg": rearrange(source_image[0], "c h w -> h w c")})
@@ -296,6 +297,7 @@ def depth2img_image_conditioning(self, source_image):
296297
return conditioning
297298

298299
def edit_image_conditioning(self, source_image):
300+
from modules.sd_samplers_common import images_tensor_to_samples, approximation_indexes
299301
conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method))
300302

301303
return conditioning_image
@@ -588,6 +590,7 @@ class DecodedSamples(list):
588590

589591

590592
def decode_latent_batch(model, batch, target_device=None, check_for_nans=False):
593+
from modules.sd_samplers_common import decode_first_stage
591594
samples = DecodedSamples()
592595

593596
for i in range(batch.shape[0]):
@@ -1156,6 +1159,7 @@ def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subs
11561159
return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts)
11571160

11581161
def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts):
1162+
11591163
if shared.state.interrupted:
11601164
return samples
11611165

@@ -1189,6 +1193,7 @@ def save_intermediate(image, index):
11891193
# Avoid making the inpainting conditioning unless necessary as
11901194
# this does need some extra compute to decode / encode the image again.
11911195
if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0:
1196+
from modules.sd_samplers_common import decode_first_stage , approximation_indexes
11921197
image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples)
11931198
else:
11941199
image_conditioning = self.txt2img_image_conditioning(samples)
@@ -1212,6 +1217,7 @@ def save_intermediate(image, index):
12121217
decoded_samples = decoded_samples.to(shared.device, dtype=devices.dtype_vae)
12131218

12141219
if opts.sd_vae_encode_method != 'Full':
1220+
from modules.sd_samplers_common import images_tensor_to_samples
12151221
self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
12161222
samples = images_tensor_to_samples(decoded_samples, approximation_indexes.get(opts.sd_vae_encode_method))
12171223

@@ -1387,6 +1393,7 @@ def mask_blur(self, value):
13871393
self.mask_blur_y = value
13881394

13891395
def init(self, all_prompts, all_seeds, all_subseeds):
1396+
from modules.sd_samplers_common import images_tensor_to_samples, approximation_indexes
13901397
self.image_cfg_scale: float = self.image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
13911398

13921399
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)

Diff for: modules/sd_samplers_common.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import numpy as np
44
import torch
55
from PIL import Image
6-
from modules import devices, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared, sd_models
6+
from modules import devices, images, sd_vae_approx, sd_vae_taesd, shared, sd_models
77
from modules.shared import opts, state
88
import k_diffusion.sampling
99

@@ -122,8 +122,8 @@ def store_latent(decoded):
122122

123123
def is_sampler_using_eta_noise_seed_delta(p):
124124
"""returns whether sampler from config will use eta noise seed delta for image creation"""
125-
126-
sampler_config = sd_samplers.find_sampler_config(p.sampler_name)
125+
from modules.sd_samplers import find_sampler_config
126+
sampler_config = find_sampler_config(p.sampler_name)
127127

128128
eta = p.eta
129129

Diff for: modules/sd_samplers_kdiffusion.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
import torch
22
import inspect
33
import k_diffusion.sampling
4-
from modules import sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser
4+
from modules.sd_samplers_common import SamplerData, Sampler, setup_img2img_steps
5+
from modules import sd_samplers_extra, sd_samplers_cfg_denoiser
56
from modules.sd_samplers_cfg_denoiser import CFGDenoiser # noqa: F401
67
from modules.script_callbacks import ExtraNoiseParams, extra_noise_callback
78

@@ -40,7 +41,7 @@
4041

4142

4243
samplers_data_k_diffusion = [
43-
sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
44+
SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
4445
for label, funcname, aliases, options in samplers_k_diffusion
4546
if callable(funcname) or hasattr(k_diffusion.sampling, funcname)
4647
]
@@ -76,7 +77,7 @@ def inner_model(self):
7677
return self.model_wrap
7778

7879

79-
class KDiffusionSampler(sd_samplers_common.Sampler):
80+
class KDiffusionSampler(Sampler):
8081
def __init__(self, funcname, sd_model, options=None):
8182
super().__init__(funcname)
8283

@@ -139,7 +140,7 @@ def get_sigmas(self, p, steps):
139140
return sigmas
140141

141142
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
142-
steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps)
143+
steps, t_enc = setup_img2img_steps(p, steps)
143144

144145
sigmas = self.get_sigmas(p, steps)
145146
sigma_sched = sigmas[steps - t_enc - 1:]

0 commit comments

Comments
 (0)