diff options
-rw-r--r-- | modules/processing.py | 4 | ||||
-rw-r--r-- | modules/shared.py | 1 |
2 files changed, 5 insertions, 0 deletions
diff --git a/modules/processing.py b/modules/processing.py index 1e17d77c..aaecb104 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -254,12 +254,16 @@ def process_images(p: StableDiffusionProcessing) -> Processed: x_sample = x_sample.astype(np.uint8)
if p.restore_faces:
+ if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
+ images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
+
devices.torch_gc()
x_sample = modules.face_restoration.restore_faces(x_sample)
image = Image.fromarray(x_sample)
+
if p.overlay_images is not None and i < len(p.overlay_images):
overlay = p.overlay_images[i]
diff --git a/modules/shared.py b/modules/shared.py index f4869c7c..afee573b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -132,6 +132,7 @@ class Options: "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."),
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
+ "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
|