diff options
Diffstat (limited to 'modules/processing.py')
-rw-r--r-- | modules/processing.py | 24 |
1 files changed, 13 insertions, 11 deletions
diff --git a/modules/processing.py b/modules/processing.py index 542d1136..1e6745cc 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -10,6 +10,7 @@ from PIL import Image, ImageFilter, ImageOps import random
import modules.sd_hijack
+from modules import devices
from modules.sd_hijack import model_hijack
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.shared import opts, cmd_opts, state
@@ -23,11 +24,6 @@ opt_C = 4 opt_f = 8
-def torch_gc():
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- torch.cuda.ipc_collect()
-
class StableDiffusionProcessing:
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", prompt_style="None", seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None):
@@ -69,6 +65,7 @@ class Processed: def __init__(self, p: StableDiffusionProcessing, images_list, seed, info):
self.images = images_list
self.prompt = p.prompt
+ self.negative_prompt = p.negative_prompt
self.seed = seed
self.info = info
self.width = p.width
@@ -80,6 +77,7 @@ class Processed: def js(self):
obj = {
"prompt": self.prompt if type(self.prompt) != list else self.prompt[0],
+ "negative_prompt": self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0],
"seed": int(self.seed if type(self.seed) != list else self.seed[0]),
"width": self.width,
"height": self.height,
@@ -174,7 +172,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
assert p.prompt is not None
- torch_gc()
+ devices.torch_gc()
fix_seed(p)
@@ -195,7 +193,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if type(p.seed) == list:
all_seeds = p.seed
else:
- all_seeds = [int(p.seed + x) for x in range(len(all_prompts))]
+ all_seeds = [int(p.seed + (x if p.subseed_strength == 0 else 0)) for x in range(len(all_prompts))]
if type(p.subseed) == list:
all_subseeds = p.subseed
@@ -275,12 +273,16 @@ def process_images(p: StableDiffusionProcessing) -> Processed: x_sample = x_sample.astype(np.uint8)
if p.restore_faces:
- torch_gc()
+ if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
+ images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
+
+ devices.torch_gc()
x_sample = modules.face_restoration.restore_faces(x_sample)
image = Image.fromarray(x_sample)
+
if p.overlay_images is not None and i < len(p.overlay_images):
overlay = p.overlay_images[i]
@@ -296,7 +298,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: image = image.convert('RGB')
if opts.samples_save and not p.do_not_save_samples:
- images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i))
+ images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
output_images.append(image)
@@ -312,9 +314,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed: output_images.insert(0, grid)
if opts.grid_save:
- images.save_image(grid, p.outpath_grids, "grid", all_seeds[0], all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename)
+ images.save_image(grid, p.outpath_grids, "grid", all_seeds[0], all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p)
- torch_gc()
+ devices.torch_gc()
return Processed(p, output_images, all_seeds[0], infotext())
|