Skip to content

Commit 08c62d7

Browse files
authored
Merge branch 'development' into dev/diffusers
2 parents 8db7054 + 47ddda1 commit 08c62d7

File tree

2 files changed

+9
-20
lines changed

2 files changed

+9
-20
lines changed

ldm/invoke/args.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -250,8 +250,6 @@ def dream_prompt_str(self,**kwargs):
250250
switches.append('--seamless')
251251
if a['hires_fix']:
252252
switches.append('--hires_fix')
253-
if a['strength'] and a['strength']>0:
254-
switches.append(f'-f {a["strength"]}')
255253

256254
# img2img generations have parameters relevant only to them and have special handling
257255
if a['init_img'] and len(a['init_img'])>0:

ldm/invoke/generator/txt2img2img.py

Lines changed: 9 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66

77
import torch
88
from PIL import Image
9-
from ldm.invoke.devices import choose_autocast
10-
from ldm.invoke.image_util import InitImageResizer
119

1210
from ldm.invoke.generator.base import Generator
1311
from ldm.invoke.generator.omnibus import Omnibus
@@ -47,13 +45,16 @@ def make_image(x_T):
4745
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
4846
)
4947

48+
#x = self.get_noise(init_width, init_height)
49+
x = x_T
50+
5051
if self.free_gpu_mem and self.model.model.device != self.model.device:
5152
self.model.model.to(self.model.device)
5253

5354
samples, _ = sampler.sample(
5455
batch_size = 1,
5556
S = steps,
56-
x_T = x_T,
57+
x_T = x,
5758
conditioning = c,
5859
shape = shape,
5960
verbose = False,
@@ -69,21 +70,11 @@ def make_image(x_T):
6970
)
7071

7172
# resizing
72-
73-
image = self.sample_to_image(samples)
74-
image = InitImageResizer(image).resize(width, height)
75-
76-
image = np.array(image).astype(np.float32) / 255.0
77-
image = image[None].transpose(0, 3, 1, 2)
78-
image = torch.from_numpy(image)
79-
image = 2.0 * image - 1.0
80-
image = image.to(self.model.device)
81-
82-
scope = choose_autocast(self.precision)
83-
with scope(self.model.device.type):
84-
samples = self.model.get_first_stage_encoding(
85-
self.model.encode_first_stage(image)
86-
) # move back to latent space
73+
samples = torch.nn.functional.interpolate(
74+
samples,
75+
size=(height // self.downsampling_factor, width // self.downsampling_factor),
76+
mode="bilinear"
77+
)
8778

8879
t_enc = int(strength * steps)
8980
ddim_sampler = DDIMSampler(self.model, device=self.model.device)

0 commit comments

Comments
 (0)