Skip to content

Commit d121406

Browse files
authored
Merge branch 'development' into dev/diffusers
2 parents 6b586b7 + b57c81a commit d121406

File tree

11 files changed

+107
-26
lines changed

11 files changed

+107
-26
lines changed

docs/features/OUTPAINTING.md

+15
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,21 @@ The new image is larger than the original (576x704) because 64 pixels were added
9292
to the top and right sides. You will need enough VRAM to process an image of
9393
this size.
9494

95+
#### Outcropping non-InvokeAI images
96+
97+
You can outcrop an arbitrary image that was not generated by InvokeAI,
98+
but your results will vary. The `inpainting-1.5` model is highly
99+
recommended, but if not feasible, then you may be able to improve the
100+
output by conditioning the outcropping with a text prompt that
101+
describes the scene using the `--new_prompt` argument:
102+
103+
```bash
104+
invoke> !fix images/vacation.png --outcrop top 128 --new_prompt "family vacation"
105+
```
106+
107+
You may also provide a different seed for outcropping to use by passing
108+
`-S<seed>`. A seed of "0" will generate a new random seed.
109+
95110
A number of caveats:
96111

97112
1. Although you can specify any pixel values, they will be rounded up to the

ldm/generate.py

+19-8
Original file line numberDiff line numberDiff line change
@@ -566,17 +566,22 @@ def apply_postprocessor(
566566
):
567567
# retrieve the seed from the image;
568568
seed = None
569-
image_metadata = None
570569
prompt = None
571570

572571
args = metadata_from_png(image_path)
573-
seed = args.seed
574-
prompt = args.prompt
575-
print(f'>> retrieved seed {seed} and prompt "{prompt}" from {image_path}')
572+
if opt.seed is not None:
573+
seed = opt.seed
574+
elif args.seed >= 0:
575+
seed = args.seed
576+
else:
577+
seed = random.randrange(0, np.iinfo(np.uint32).max)
578+
579+
if opt.prompt is not None:
580+
prompt = opt.prompt
581+
else:
582+
prompt = args.prompt
576583

577-
if not seed:
578-
print('* Could not recover seed for image. Replacing with 42. This will not affect image quality')
579-
seed = 42
584+
print(f'>> using seed {seed} and prompt "{prompt}" for {image_path}')
580585

581586
# try to reuse the same filename prefix as the original file.
582587
# we take everything up to the first period
@@ -623,6 +628,10 @@ def apply_postprocessor(
623628
extend_instructions[direction]=int(pixels)
624629
except ValueError:
625630
print(f'** invalid extension instruction. Use <directions> <pixels>..., as in "top 64 left 128 right 64 bottom 64"')
631+
632+
opt.seed = seed
633+
opt.prompt = prompt
634+
626635
if len(extend_instructions)>0:
627636
restorer = Outcrop(image,self,)
628637
return restorer.process (
@@ -1085,7 +1094,9 @@ def _has_transparency(self, image):
10851094
return True
10861095
return False
10871096

1088-
def _check_for_erasure(self, image):
1097+
def _check_for_erasure(self, image:Image.Image)->bool:
1098+
if image.mode not in ('RGBA','RGB'):
1099+
return False
10891100
width, height = image.size
10901101
pixdata = image.load()
10911102
colored = 0

ldm/invoke/args.py

+5
Original file line numberDiff line numberDiff line change
@@ -866,6 +866,11 @@ def _create_dream_cmd_parser(self):
866866
default=32,
867867
help='When outpainting, the tile size to use for filling outpaint areas',
868868
)
869+
postprocessing_group.add_argument(
870+
'--new_prompt',
871+
type=str,
872+
help='Change the text prompt applied during postprocessing (default, use original generation prompt)',
873+
)
869874
postprocessing_group.add_argument(
870875
'-ft',
871876
'--facetool',

ldm/invoke/generator/base.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ def generate(self,prompt,init_image,width,height,sampler, iterations=1,seed=None
7272
**kwargs
7373
)
7474
results = []
75-
seed = seed if seed is not None else self.new_seed()
75+
seed = seed if seed is not None and seed > 0 else self.new_seed()
7676
first_seed = seed
7777
seed, initial_noise = self.generate_initial_noise(seed, width, height)
7878

ldm/invoke/model_cache.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -112,10 +112,13 @@ def set_default_model(self,model_name:str):
112112
Set the default model. The change will not take
113113
effect until you call model_cache.commit()
114114
'''
115+
print(f'DEBUG: before set_default_model()\n{OmegaConf.to_yaml(self.config)}')
115116
assert model_name in self.models,f"unknown model '{model_name}'"
116-
for model in self.models:
117-
self.models[model].pop('default',None)
118-
self.models[model_name]['default'] = True
117+
config = self.config
118+
for model in config:
119+
config[model].pop('default',None)
120+
config[model_name]['default'] = True
121+
print(f'DEBUG: after set_default_model():\n{OmegaConf.to_yaml(self.config)}')
119122

120123
def list_models(self) -> dict:
121124
'''

ldm/invoke/prompt_parser.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -636,7 +636,7 @@ def split_weighted_subprompts(text, skip_normalize=False)->list:
636636
weight_sum = sum(map(lambda x: x[1], parsed_prompts))
637637
if weight_sum == 0:
638638
print(
639-
"Warning: Subprompt weights add up to zero. Discarding and using even weights instead.")
639+
"* Warning: Subprompt weights add up to zero. Discarding and using even weights instead.")
640640
equal_weight = 1 / max(len(parsed_prompts), 1)
641641
return [(x[0], equal_weight) for x in parsed_prompts]
642642
return [(x[0], x[1] / weight_sum) for x in parsed_prompts]

ldm/invoke/readline.py

+2
Original file line numberDiff line numberDiff line change
@@ -284,6 +284,7 @@ def _path_completions(self, text, state, extensions, shortcut_ok=True):
284284
switch,partial_path = match.groups()
285285
partial_path = partial_path.lstrip()
286286

287+
287288
matches = list()
288289
path = os.path.expanduser(partial_path)
289290

@@ -321,6 +322,7 @@ def _path_completions(self, text, state, extensions, shortcut_ok=True):
321322
matches.append(
322323
switch+os.path.join(os.path.dirname(full_path), node)
323324
)
325+
324326
return matches
325327

326328
class DummyCompleter(Completer):

ldm/invoke/restoration/outcrop.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,12 @@ def process (
2828
self.generate._set_sampler()
2929

3030
def wrapped_callback(img,seed,**kwargs):
31-
image_callback(img,orig_opt.seed,use_prefix=prefix,**kwargs)
31+
preferred_seed = orig_opt.seed if orig_opt.seed> 0 else seed
32+
image_callback(img,preferred_seed,use_prefix=prefix,**kwargs)
3233

3334
result= self.generate.prompt2image(
34-
orig_opt.prompt,
35-
seed = orig_opt.seed, # uncomment to make it deterministic
35+
opt.prompt,
36+
seed = opt.seed if opt.seed else orig_opt.seed,
3637
sampler = self.generate.sampler,
3738
steps = opt.steps,
3839
cfg_scale = opt.cfg_scale,

ldm/modules/attention.py

-1
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,6 @@ def einsum_op_cuda(self, q, k, v):
282282

283283
def get_attention_mem_efficient(self, q, k, v):
284284
if q.device.type == 'cuda':
285-
torch.cuda.empty_cache()
286285
#print("in get_attention_mem_efficient with q shape", q.shape, ", k shape", k.shape, ", free memory is", get_mem_free_total(q.device))
287286
return self.einsum_op_cuda(q, k, v)
288287

requirements.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,4 +38,4 @@ git+https://github.com/openai/CLIP.git@main#egg=clip
3838
git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
3939
git+https://github.com/invoke-ai/Real-ESRGAN.git#egg=realesrgan
4040
git+https://github.com/invoke-ai/GFPGAN.git#egg=gfpgan
41-
-e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
41+
git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg

scripts/invoke.py

+53-8
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
def main():
2929
"""Initialize command-line parsers and the diffusion model"""
3030
global infile
31+
print('* Initializing, be patient...')
3132

3233
opt = Args()
3334
args = opt.parse_args()
@@ -45,7 +46,6 @@ def main():
4546
print('--max_loaded_models must be >= 1; using 1')
4647
args.max_loaded_models = 1
4748

48-
print('* Initializing, be patient...')
4949
from ldm.generate import Generate
5050

5151
# these two lines prevent a horrible warning message from appearing
@@ -89,7 +89,12 @@ def main():
8989
safety_checker=opt.safety_checker,
9090
max_loaded_models=opt.max_loaded_models,
9191
)
92-
except (FileNotFoundError, IOError, KeyError) as e:
92+
except FileNotFoundError:
93+
print('** You appear to be missing configs/models.yaml')
94+
print('** You can either exit this script and run scripts/preload_models.py, or fix the problem now.')
95+
emergency_model_create(opt)
96+
sys.exit(-1)
97+
except (IOError, KeyError) as e:
9398
print(f'{e}. Aborting.')
9499
sys.exit(-1)
95100

@@ -276,7 +281,7 @@ def image_writer(image, seed, upscaled=False, first_seed=None, use_prefix=None):
276281
filename = f'{prefix}.{use_prefix}.{seed}.png'
277282
tm = opt.text_mask[0]
278283
th = opt.text_mask[1] if len(opt.text_mask)>1 else 0.5
279-
formatted_dream_prompt = f'!mask {opt.prompt} -tm {tm} {th}'
284+
formatted_dream_prompt = f'!mask {opt.input_file_path} -tm {tm} {th}'
280285
path = file_writer.save_image_and_prompt_to_png(
281286
image = image,
282287
dream_prompt = formatted_dream_prompt,
@@ -316,7 +321,7 @@ def image_writer(image, seed, upscaled=False, first_seed=None, use_prefix=None):
316321
tool = re.match('postprocess:(\w+)',opt.last_operation).groups()[0]
317322
add_postprocessing_to_metadata(
318323
opt,
319-
opt.prompt,
324+
opt.input_file_path,
320325
filename,
321326
tool,
322327
formatted_dream_prompt,
@@ -481,6 +486,7 @@ def do_command(command:str, gen, opt:Args, completer) -> tuple:
481486
command = '-h'
482487
return command, operation
483488

489+
484490
def add_weights_to_config(model_path:str, gen, opt, completer):
485491
print(f'>> Model import in process. Please enter the values needed to configure this model:')
486492
print()
@@ -577,7 +583,7 @@ def write_config_file(conf_path, gen, model_name, new_config, clobber=False, mak
577583

578584
try:
579585
print('>> Verifying that new model loads...')
580-
yaml_str = gen.model_cache.add_model(model_name, new_config, clobber)
586+
gen.model_cache.add_model(model_name, new_config, clobber)
581587
assert gen.set_model(model_name) is not None, 'model failed to load'
582588
except AssertionError as e:
583589
print(f'** aborting **')
@@ -613,10 +619,16 @@ def do_textmask(gen, opt, callback):
613619
)
614620

615621
def do_postprocess (gen, opt, callback):
616-
file_path = opt.prompt # treat the prompt as the file pathname
622+
file_path = opt.prompt # treat the prompt as the file pathname
623+
if opt.new_prompt is not None:
624+
opt.prompt = opt.new_prompt
625+
else:
626+
opt.prompt = None
627+
617628
if os.path.dirname(file_path) == '': #basename given
618629
file_path = os.path.join(opt.outdir,file_path)
619630

631+
opt.input_file_path = file_path
620632
tool=None
621633
if opt.facetool_strength > 0:
622634
tool = opt.facetool
@@ -655,7 +667,10 @@ def do_postprocess (gen, opt, callback):
655667
def add_postprocessing_to_metadata(opt,original_file,new_file,tool,command):
656668
original_file = original_file if os.path.exists(original_file) else os.path.join(opt.outdir,original_file)
657669
new_file = new_file if os.path.exists(new_file) else os.path.join(opt.outdir,new_file)
658-
meta = retrieve_metadata(original_file)['sd-metadata']
670+
try:
671+
meta = retrieve_metadata(original_file)['sd-metadata']
672+
except AttributeError:
673+
meta = retrieve_metadata(new_file)['sd-metadata']
659674
if 'image' not in meta:
660675
meta = metadata_dumps(opt,seeds=[opt.seed])['image']
661676
meta['image'] = {}
@@ -703,7 +718,7 @@ def prepare_image_metadata(
703718
elif len(prior_variations) > 0:
704719
formatted_dream_prompt = opt.dream_prompt_str(seed=first_seed)
705720
elif operation == 'postprocess':
706-
formatted_dream_prompt = '!fix '+opt.dream_prompt_str(seed=seed)
721+
formatted_dream_prompt = '!fix '+opt.dream_prompt_str(seed=seed,prompt=opt.input_file_path)
707722
else:
708723
formatted_dream_prompt = opt.dream_prompt_str(seed=seed)
709724
return filename,formatted_dream_prompt
@@ -877,6 +892,36 @@ def write_commands(opt, file_path:str, outfilepath:str):
877892
f.write('\n'.join(commands))
878893
print(f'>> File {outfilepath} with commands created')
879894

895+
def emergency_model_create(opt:Args):
896+
completer = get_completer(opt)
897+
completer.complete_extensions(('.yaml','.yml','.ckpt','.vae.pt'))
898+
completer.set_default_dir('.')
899+
valid_path = False
900+
while not valid_path:
901+
weights_file = input('Enter the path to a downloaded models file, or ^C to exit: ')
902+
valid_path = os.path.exists(weights_file)
903+
dir,basename = os.path.split(weights_file)
904+
905+
valid_name = False
906+
while not valid_name:
907+
name = input('Enter a short name for this model (no spaces): ')
908+
name = 'unnamed model' if len(name)==0 else name
909+
valid_name = ' ' not in name
910+
911+
description = input('Enter a description for this model: ')
912+
description = 'no description' if len(description)==0 else description
913+
914+
with open(opt.conf, 'w', encoding='utf-8') as f:
915+
f.write(f'{name}:\n')
916+
f.write(f' description: {description}\n')
917+
f.write(f' weights: {weights_file}\n')
918+
f.write(f' config: ./configs/stable-diffusion/v1-inference.yaml\n')
919+
f.write(f' width: 512\n')
920+
f.write(f' height: 512\n')
921+
f.write(f' default: true\n')
922+
print(f'Config file {opt.conf} is created. This script will now exit.')
923+
print(f'After restarting you may examine the entry with !models and edit it with !edit.')
924+
880925
######################################
881926

882927
if __name__ == '__main__':

0 commit comments

Comments
 (0)