1
1
import gradio as gr
2
+ import torch
3
+ import modules .devices as devices
2
4
3
5
from modules import scripts
4
6
from ldm_patched .modules import model_management
@@ -17,23 +19,54 @@ def title(self):
17
19
def show (self , is_img2img ):
18
20
return scripts .AlwaysVisible
19
21
22
+ """
23
+ The following two functions are pulled directly from
24
+ pkuliyi2015/multidiffusion-upscaler-for-automatic1111
25
+ """
26
+ def get_rcmd_enc_tsize (self ):
27
+ if torch .cuda .is_available () and devices .device not in ['cpu' , devices .cpu ]:
28
+ total_memory = torch .cuda .get_device_properties (devices .device ).total_memory // 2 ** 20
29
+ if total_memory > 16 * 1000 : ENCODER_TILE_SIZE = 3072
30
+ elif total_memory > 12 * 1000 : ENCODER_TILE_SIZE = 2048
31
+ elif total_memory > 8 * 1000 : ENCODER_TILE_SIZE = 1536
32
+ else : ENCODER_TILE_SIZE = 960
33
+ else : ENCODER_TILE_SIZE = 512
34
+ return ENCODER_TILE_SIZE
35
+
36
+ def get_rcmd_dec_tsize (self ):
37
+ if torch .cuda .is_available () and devices .device not in ['cpu' , devices .cpu ]:
38
+ total_memory = torch .cuda .get_device_properties (devices .device ).total_memory // 2 ** 20
39
+ if total_memory > 30 * 1000 : DECODER_TILE_SIZE = 256
40
+ elif total_memory > 16 * 1000 : DECODER_TILE_SIZE = 192
41
+ elif total_memory > 12 * 1000 : DECODER_TILE_SIZE = 128
42
+ elif total_memory > 8 * 1000 : DECODER_TILE_SIZE = 96
43
+ else : DECODER_TILE_SIZE = 64
44
+ else : DECODER_TILE_SIZE = 64
45
+ return DECODER_TILE_SIZE
46
+
20
47
def ui (self , * args , ** kwargs ):
21
48
with gr .Accordion (open = False , label = self .title ()):
22
49
unet_enabled = gr .Checkbox (label = 'Enabled for UNet (always maximize offload)' , value = False )
23
50
vae_enabled = gr .Checkbox (label = 'Enabled for VAE (always tiled)' , value = False )
24
- return unet_enabled , vae_enabled
51
+ encoder_tile_size = gr .Slider (label = 'Encoder Tile Size' , minimum = 256 , maximum = 4096 , step = 16 , value = self .get_rcmd_enc_tsize ())
52
+ decoder_tile_size = gr .Slider (label = 'Decoder Tile Size' , minimum = 48 , maximum = 512 , step = 16 , value = self .get_rcmd_dec_tsize ())
53
+ return unet_enabled , vae_enabled , encoder_tile_size , decoder_tile_size
25
54
26
55
def process (self , p , * script_args , ** kwargs ):
27
- unet_enabled , vae_enabled = script_args
56
+ unet_enabled , vae_enabled , encoder_tile_size , decoder_tile_size = script_args
28
57
29
58
if unet_enabled :
30
59
print ('NeverOOM Enabled for UNet (always maximize offload)' )
31
60
32
61
if vae_enabled :
33
62
print ('NeverOOM Enabled for VAE (always tiled)' )
63
+ print ('With tile sizes' )
64
+ print (f'Encode:\t x:{ encoder_tile_size } \t y:{ encoder_tile_size } ' )
65
+ print (f'Decode:\t x:{ decoder_tile_size } \t y:{ decoder_tile_size } ' )
34
66
35
67
model_management .VAE_ALWAYS_TILED = vae_enabled
36
-
68
+ model_management .VAE_ENCODE_TILE_SIZE_X = model_management .VAE_ENCODE_TILE_SIZE_Y = encoder_tile_size
69
+ model_management .VAE_DECODE_TILE_SIZE_X = model_management .VAE_DECODE_TILE_SIZE_Y = decoder_tile_size
37
70
if self .previous_unet_enabled != unet_enabled :
38
71
model_management .unload_all_models ()
39
72
if unet_enabled :
0 commit comments