From 65fbefd0337f9deb913c6956a9cfe2155c9c2f5b Mon Sep 17 00:00:00 2001 From: xeonvs Date: Wed, 7 Sep 2022 15:58:25 +0200 Subject: Added support for launching on Apple Silicon --- modules/esrgan_model.py | 7 +++++-- modules/sd_hijack.py | 5 ++++- modules/shared.py | 9 ++++++--- 3 files changed, 15 insertions(+), 6 deletions(-) (limited to 'modules') diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index 3dcef5a6..2ed1d273 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -14,8 +14,11 @@ import modules.images def load_model(filename): # this code is adapted from https://github.com/xinntao/ESRGAN - - pretrained_net = torch.load(filename) + if torch.has_mps: + map_l = 'cpu' + else: + map_l = None + pretrained_net = torch.load(filename, map_location=map_l) crt_model = arch.RRDBNet(3, 3, 64, 23, gc=32) if 'conv_first.weight' in pretrained_net: diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 2d26b5f7..9d0637bf 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -232,7 +232,10 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): z = outputs.last_hidden_state # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise - batch_multipliers = torch.asarray(np.array(batch_multipliers)).to(device) + if torch.has_mps: + batch_multipliers = torch.asarray(np.array(batch_multipliers).astype('float32')).to(device) + else: + batch_multipliers = torch.asarray(np.array(batch_multipliers)).to(device) original_mean = z.mean() z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) new_mean = z.mean() diff --git a/modules/shared.py b/modules/shared.py index beb6f9bb..e529ec27 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -36,9 +36,12 @@ parser.add_argument("--opt-split-attention", action='store_true', help="enable o parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") cmd_opts = parser.parse_args() -cpu = torch.device("cpu") -gpu = torch.device("cuda") -device = gpu if torch.cuda.is_available() else cpu +if torch.has_cuda: + device = torch.device("cuda") +elif torch.has_mps: + device = torch.device("mps") +else: + device = torch.device("cpu") batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram -- cgit v1.2.1 From aaeeef82fa61f719cd5a2590423a99bd1229d5f1 Mon Sep 17 00:00:00 2001 From: xeonvs Date: Wed, 7 Sep 2022 18:09:30 +0200 Subject: Miss device type for option --medvram --- modules/lowvram.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/lowvram.py b/modules/lowvram.py index 4b78deab..bd117491 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -2,9 +2,12 @@ import torch module_in_gpu = None cpu = torch.device("cpu") -gpu = torch.device("cuda") -device = gpu if torch.cuda.is_available() else cpu - +if torch.has_cuda: + device = gpu = torch.device("cuda") +elif torch.has_mps: + device = gpu = torch.device("mps") +else: + device = gpu = torch.device("cpu") def setup_for_low_vram(sd_model, use_medvram): parents = {} -- cgit v1.2.1 From ba1124b326280202cb583bbdc669fb5303bbd3e3 Mon Sep 17 00:00:00 2001 From: xeonvs Date: Wed, 7 Sep 2022 20:40:32 +0200 Subject: directly convert list to tensor --- modules/sd_hijack.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 9d0637bf..1084e248 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -232,10 +232,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): z = outputs.last_hidden_state # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise - if torch.has_mps: - batch_multipliers = torch.asarray(np.array(batch_multipliers).astype('float32')).to(device) - else: - batch_multipliers = torch.asarray(np.array(batch_multipliers)).to(device) + batch_multipliers = torch.asarray(batch_multipliers).to(device) original_mean = z.mean() z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) new_mean = z.mean() -- cgit v1.2.1 From 7045c846435cf5c9547729c60e85c386e78c90ed Mon Sep 17 00:00:00 2001 From: fuzzytent Date: Wed, 7 Sep 2022 22:37:54 +0200 Subject: Also use alpha channel from img2img input image as mask --- modules/img2img.py | 6 ++++-- modules/ui.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/img2img.py b/modules/img2img.py index 3129798d..1e734ac8 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -1,5 +1,5 @@ import math -from PIL import Image +from PIL import Image, ImageOps, ImageChops from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, state @@ -16,7 +16,9 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index if is_inpaint: image = init_img_with_mask['image'] - mask = init_img_with_mask['mask'] + alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1') + mask = ImageChops.lighter(alpha_mask, init_img_with_mask['mask'].convert('L')).convert('RGBA') + image = image.convert('RGB') else: image = init_img mask = None diff --git a/modules/ui.py b/modules/ui.py index f5564d0e..b1a8c776 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -323,7 +323,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): with gr.Group(): switch_mode = gr.Radio(label='Mode', elem_id="img2img_mode", choices=['Redraw whole image', 'Inpaint a part of image', 'Loopback', 'SD upscale'], value='Redraw whole image', type="index", show_label=False) init_img = gr.Image(label="Image for img2img", source="upload", interactive=True, type="pil") - init_img_with_mask = gr.Image(label="Image for inpainting with mask", elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", visible=False) + init_img_with_mask = gr.Image(label="Image for inpainting with mask", elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", visible=False, image_mode="RGBA") resize_mode = gr.Radio(label="Resize mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize") steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20) -- cgit v1.2.1 From 52e071da2a04acfd19cf8f6e69e006bd59937447 Mon Sep 17 00:00:00 2001 From: rewbs Date: Thu, 8 Sep 2022 02:35:26 +0000 Subject: Add color correction to img2img loopback to avoid a progressive skew to magenta. Based on codedealer's PR to hlky's repo here: https://github.com/sd-webui/stable-diffusion-webui/pull/698/files. --- modules/img2img.py | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/img2img.py b/modules/img2img.py index 3129798d..2c74842d 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -1,4 +1,6 @@ import math +import cv2 +import numpy as np from PIL import Image from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images @@ -57,8 +59,19 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index state.job_count = n_iter + do_color_correction = False + try: + from skimage import exposure + do_color_correction = True + except: + print("Install scikit-image to perform color correction on loopback") + + for i in range(n_iter): + if do_color_correction and i == 0: + correction_target = cv2.cvtColor(np.asarray(init_img.copy()), cv2.COLOR_RGB2LAB) + p.n_iter = 1 p.batch_size = 1 p.do_not_save_grid = True @@ -69,8 +82,21 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index if initial_seed is None: initial_seed = processed.seed initial_info = processed.info - - p.init_images = [processed.images[0]] + + init_img = processed.images[0] + + if do_color_correction and correction_target is not None: + print("Colour correcting input...") + init_img = Image.fromarray(cv2.cvtColor(exposure.match_histograms( + cv2.cvtColor( + np.asarray(init_img), + cv2.COLOR_RGB2LAB + ), + correction_target, + channel_axis=2 + ), cv2.COLOR_LAB2RGB).astype("uint8")) + + p.init_images = [init_img] p.seed = processed.seed + 1 p.denoising_strength = max(p.denoising_strength * 0.95, 0.1) history.append(processed.images[0]) -- cgit v1.2.1 From 1e7a36fd79612d924f0aca18061f1b3bd947b02a Mon Sep 17 00:00:00 2001 From: rewbs Date: Thu, 8 Sep 2022 02:53:13 +0000 Subject: Remove debug print. --- modules/img2img.py | 1 - 1 file changed, 1 deletion(-) (limited to 'modules') diff --git a/modules/img2img.py b/modules/img2img.py index 2c74842d..52971785 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -86,7 +86,6 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index init_img = processed.images[0] if do_color_correction and correction_target is not None: - print("Colour correcting input...") init_img = Image.fromarray(cv2.cvtColor(exposure.match_histograms( cv2.cvtColor( np.asarray(init_img), -- cgit v1.2.1