From 8ae0ea9deaa5a09d1e0aa8b2f8e97c38d71cdbda Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 30 Oct 2022 23:48:33 +0000 Subject: Add callback to sd_samplers --- modules/sd_samplers.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 3670b57d..30cb5c4b 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -11,6 +11,7 @@ from modules import prompt_parser, devices, processing, images from modules.shared import opts, cmd_opts, state import modules.shared as shared +from modules.script_callbacks import CGFDenoiserParams, cfg_denoiser_callback SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) @@ -278,6 +279,8 @@ class CFGDenoiser(torch.nn.Module): image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond]) sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) + cfg_denoiser_callback(CGFDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps)) + if tensor.shape[1] == uncond.shape[1]: cond_in = torch.cat([tensor, uncond]) -- cgit v1.2.1 From 5b6bedf6f2ebacb7f1f5809af8e26a6a1af16e2a Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Wed, 2 Nov 2022 00:38:17 +0000 Subject: Update class name and assign back to vars --- modules/sd_samplers.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 30cb5c4b..ebc0d896 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -11,7 +11,7 @@ from modules import prompt_parser, devices, processing, images from modules.shared import opts, cmd_opts, state import modules.shared as shared -from modules.script_callbacks import CGFDenoiserParams, cfg_denoiser_callback +from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) @@ -279,7 +279,11 @@ class CFGDenoiser(torch.nn.Module): image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond]) sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) - cfg_denoiser_callback(CGFDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps)) + denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps) + cfg_denoiser_callback(denoiser_params) + x_in = denoiser_params.x + image_cond_in = denoiser_params.image_cond + sigma_in = denoiser_params.sigma if tensor.shape[1] == uncond.shape[1]: cond_in = torch.cat([tensor, uncond]) -- cgit v1.2.1 From 9c67408004ed132637d10321bf44565f82055fd2 Mon Sep 17 00:00:00 2001 From: timntorres <116157310+timntorres@users.noreply.github.com> Date: Wed, 2 Nov 2022 02:18:21 -0700 Subject: Allow saving "before-highres-fix. (#4150) * Save image/s before doing highres fix. --- modules/sd_samplers.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 44d4c189..d7fa89a0 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -93,9 +93,8 @@ def single_sample_to_image(sample): return Image.fromarray(x_sample) -def sample_to_image(samples): - return single_sample_to_image(samples[0]) - +def sample_to_image(samples, index=0): + return single_sample_to_image(samples[index]) def samples_to_image_grid(samples): return images.image_grid([single_sample_to_image(sample) for sample in samples]) -- cgit v1.2.1 From eb5e82c7ddf5e72fa13b83bd1f12d3a07a4de1a4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 2 Nov 2022 12:45:03 +0300 Subject: do not unnecessarily run VAE one more time when saving intermediate image with hires fix --- modules/sd_samplers.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index d7fa89a0..c7c414ef 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -96,6 +96,7 @@ def single_sample_to_image(sample): def sample_to_image(samples, index=0): return single_sample_to_image(samples[index]) + def samples_to_image_grid(samples): return images.image_grid([single_sample_to_image(sample) for sample in samples]) -- cgit v1.2.1 From 6008c0773ea575353f9b87da8a58454e20cc7857 Mon Sep 17 00:00:00 2001 From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com> Date: Fri, 4 Nov 2022 23:03:05 +0000 Subject: Add support for new DPM-Solver++ samplers --- modules/sd_samplers.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index c7c414ef..7ece6556 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -29,6 +29,10 @@ samplers_k_diffusion = [ ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras'}), ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}), + ('DPM-Solver++(2S) a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}), + ('DPM-Solver++(2M)', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), + ('DPM-Solver++(2S) Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}), + ('DPM-Solver++(2M) Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), ] samplers_data_k_diffusion = [ -- cgit v1.2.1 From f92dc505a013af9e385c7edbdf97539be62503d6 Mon Sep 17 00:00:00 2001 From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com> Date: Fri, 4 Nov 2022 23:12:48 +0000 Subject: Fix name --- modules/sd_samplers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 7ece6556..b28a2e4c 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -31,7 +31,7 @@ samplers_k_diffusion = [ ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}), ('DPM-Solver++(2S) a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}), ('DPM-Solver++(2M)', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM-Solver++(2S) Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}), + ('DPM-Solver++(2S) a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}), ('DPM-Solver++(2M) Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), ] -- cgit v1.2.1 From 1b6c2fc749e12f12bbee4705e65f217d23fa9072 Mon Sep 17 00:00:00 2001 From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com> Date: Fri, 4 Nov 2022 23:28:13 +0000 Subject: Reorder samplers --- modules/sd_samplers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index b28a2e4c..1e88f7ee 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -24,13 +24,13 @@ samplers_k_diffusion = [ ('Heun', 'sample_heun', ['k_heun'], {}), ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {}), ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {}), + ('DPM-Solver++(2S) a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}), + ('DPM-Solver++(2M)', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}), ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}), ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras'}), ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}), - ('DPM-Solver++(2S) a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}), - ('DPM-Solver++(2M)', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), ('DPM-Solver++(2S) a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}), ('DPM-Solver++(2M) Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), ] -- cgit v1.2.1 From 159475e072f2ed3db8235aab9c3fa18640b93b80 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 5 Nov 2022 18:32:22 +0300 Subject: tweak names a bit for new samplers --- modules/sd_samplers.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 1e88f7ee..783992d2 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -24,15 +24,15 @@ samplers_k_diffusion = [ ('Heun', 'sample_heun', ['k_heun'], {}), ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {}), ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {}), - ('DPM-Solver++(2S) a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}), - ('DPM-Solver++(2M)', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), + ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}), + ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}), ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}), ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras'}), ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}), - ('DPM-Solver++(2S) a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}), - ('DPM-Solver++(2M) Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), + ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}), + ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), ] samplers_data_k_diffusion = [ -- cgit v1.2.1 From cdc8020d13c5eef099c609b0a911ccf3568afc0d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 19 Nov 2022 12:01:51 +0300 Subject: change StableDiffusionProcessing to internally use sampler name instead of sampler index --- modules/sd_samplers.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 783992d2..4fe67854 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -46,16 +46,23 @@ all_samplers = [ SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {}), SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {}), ] +all_samplers_map = {x.name: x for x in all_samplers} samplers = [] samplers_for_img2img = [] -def create_sampler_with_index(list_of_configs, index, model): - config = list_of_configs[index] +def create_sampler(name, model): + if name is not None: + config = all_samplers_map.get(name, None) + else: + config = all_samplers[0] + + assert config is not None, f'bad sampler name: {name}' + sampler = config.constructor(model) sampler.config = config - + return sampler -- cgit v1.2.1 From 0a01f5089127f1ab86625036526082f544344a10 Mon Sep 17 00:00:00 2001 From: uservar <63248296+uservar@users.noreply.github.com> Date: Tue, 22 Nov 2022 14:24:50 +0000 Subject: Add DPM++ SDE sampler --- modules/sd_samplers.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 4fe67854..80e91d62 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -26,6 +26,7 @@ samplers_k_diffusion = [ ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {}), ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}), ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), + ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {}), ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}), ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}), ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), @@ -33,6 +34,7 @@ samplers_k_diffusion = [ ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}), ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}), ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), + ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras'}), ] samplers_data_k_diffusion = [ -- cgit v1.2.1 From c833d5bfaae05de41d8e795aba5b15822673ef04 Mon Sep 17 00:00:00 2001 From: Jay Smith Date: Fri, 25 Nov 2022 20:12:23 -0600 Subject: fixes #3449 - VRAM leak when switching to/from inpainting model --- modules/sd_samplers.py | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 4fe67854..44112f99 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -1,4 +1,4 @@ -from collections import namedtuple +from collections import namedtuple, deque import numpy as np from math import floor import torch @@ -335,18 +335,28 @@ class CFGDenoiser(torch.nn.Module): class TorchHijack: - def __init__(self, kdiff_sampler): - self.kdiff_sampler = kdiff_sampler + def __init__(self, sampler_noises): + # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based + # implementation. + self.sampler_noises = deque(sampler_noises) def __getattr__(self, item): if item == 'randn_like': - return self.kdiff_sampler.randn_like + return self.randn_like if hasattr(torch, item): return getattr(torch, item) raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item)) + def randn_like(self, x): + if self.sampler_noises: + noise = self.sampler_noises.popleft() + if noise.shape == x.shape: + return noise + + return torch.randn_like(x) + class KDiffusionSampler: def __init__(self, funcname, sd_model): @@ -356,7 +366,6 @@ class KDiffusionSampler: self.extra_params = sampler_extra_params.get(funcname, []) self.model_wrap_cfg = CFGDenoiser(self.model_wrap) self.sampler_noises = None - self.sampler_noise_index = 0 self.stop_at = None self.eta = None self.default_eta = 1.0 @@ -389,26 +398,14 @@ class KDiffusionSampler: def number_of_needed_noises(self, p): return p.steps - def randn_like(self, x): - noise = self.sampler_noises[self.sampler_noise_index] if self.sampler_noises is not None and self.sampler_noise_index < len(self.sampler_noises) else None - - if noise is not None and x.shape == noise.shape: - res = noise - else: - res = torch.randn_like(x) - - self.sampler_noise_index += 1 - return res - def initialize(self, p): self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None self.model_wrap.step = 0 - self.sampler_noise_index = 0 self.eta = p.eta or opts.eta_ancestral if self.sampler_noises is not None: - k_diffusion.sampling.torch = TorchHijack(self) + k_diffusion.sampling.torch = TorchHijack(self.sampler_noises) extra_params_kwargs = {} for param_name in self.extra_params: -- cgit v1.2.1 From ce6911158b5b2f9cf79b405a1f368f875492044d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 26 Nov 2022 16:10:46 +0300 Subject: Add support Stable Diffusion 2.0 --- modules/sd_samplers.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 4fe67854..4edd8c60 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -127,7 +127,8 @@ class InterruptedException(BaseException): class VanillaStableDiffusionSampler: def __init__(self, constructor, sd_model): self.sampler = constructor(sd_model) - self.orig_p_sample_ddim = self.sampler.p_sample_ddim if hasattr(self.sampler, 'p_sample_ddim') else self.sampler.p_sample_plms + self.is_plms = hasattr(self.sampler, 'p_sample_plms') + self.orig_p_sample_ddim = self.sampler.p_sample_plms if self.is_plms else self.sampler.p_sample_ddim self.mask = None self.nmask = None self.init_latent = None @@ -218,7 +219,6 @@ class VanillaStableDiffusionSampler: self.mask = p.mask if hasattr(p, 'mask') else None self.nmask = p.nmask if hasattr(p, 'nmask') else None - def adjust_steps_if_invalid(self, p, num_steps): if (self.config.name == 'DDIM' and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS'): valid_step = 999 / (1000 // num_steps) @@ -227,7 +227,6 @@ class VanillaStableDiffusionSampler: return num_steps - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): steps, t_enc = setup_img2img_steps(p, steps) steps = self.adjust_steps_if_invalid(p, steps) @@ -260,9 +259,10 @@ class VanillaStableDiffusionSampler: steps = self.adjust_steps_if_invalid(p, steps or p.steps) # Wrap the conditioning models with additional image conditioning for inpainting model + # dummy_for_plms is needed because PLMS code checks the first item in the dict to have the right shape if image_conditioning is not None: - conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]} - unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]} + conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_concat": [image_conditioning]} + unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_concat": [image_conditioning]} samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0]) @@ -350,7 +350,9 @@ class TorchHijack: class KDiffusionSampler: def __init__(self, funcname, sd_model): - self.model_wrap = k_diffusion.external.CompVisDenoiser(sd_model, quantize=shared.opts.enable_quantization) + denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser + + self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) self.funcname = funcname self.func = getattr(k_diffusion.sampling, self.funcname) self.extra_params = sampler_extra_params.get(funcname, []) -- cgit v1.2.1 From 10923f9b3a10a9af20429e51242614e259fbd434 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 27 Nov 2022 13:43:10 +0300 Subject: calculate dictionary for sampler names only once --- modules/sd_samplers.py | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 43ce34eb..6f8ccf1d 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -52,6 +52,7 @@ all_samplers_map = {x.name: x for x in all_samplers} samplers = [] samplers_for_img2img = [] +samplers_map = {} def create_sampler(name, model): @@ -77,6 +78,12 @@ def set_samplers(): samplers = [x for x in all_samplers if x.name not in hidden] samplers_for_img2img = [x for x in all_samplers if x.name not in hidden_img2img] + samplers_map.clear() + for sampler in all_samplers: + samplers_map[sampler.name.lower()] = sampler.name + for alias in sampler.aliases: + samplers_map[alias.lower()] = sampler.name + set_samplers() -- cgit v1.2.1 From 506d529d19f135f57e142371271f84d4971b456f Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 27 Nov 2022 16:28:32 +0300 Subject: rework #5012 to also work for pictures dragged into the prompt and also add Clip skip + ENSD to parameters --- modules/sd_samplers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 2ca17d8b..5fefb227 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -18,7 +18,7 @@ from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) samplers_k_diffusion = [ - ('Euler a', 'sample_euler_ancestral', ['k_euler_a'], {}), + ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {}), ('Euler', 'sample_euler', ['k_euler'], {}), ('LMS', 'sample_lms', ['k_lms'], {}), ('Heun', 'sample_heun', ['k_heun'], {}), -- cgit v1.2.1 From 21effd629d0fdfdbbff2b20a9f4a3767e7e8bd33 Mon Sep 17 00:00:00 2001 From: brkirch Date: Mon, 28 Nov 2022 21:24:06 -0500 Subject: Add workaround for using MPS with torchsde --- modules/sd_samplers.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 5fefb227..8b11f569 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -6,6 +6,7 @@ import tqdm from PIL import Image import inspect import k_diffusion.sampling +import torchsde._brownian.brownian_interval import ldm.models.diffusion.ddim import ldm.models.diffusion.plms from modules import prompt_parser, devices, processing, images @@ -367,6 +368,19 @@ class TorchHijack: return torch.randn_like(x) +# MPS fix for randn in torchsde +def torchsde_randn(size, dtype, device, seed): + if device.type == 'mps': + generator = torch.Generator(devices.cpu).manual_seed(int(seed)) + return torch.randn(size, dtype=dtype, device=devices.cpu, generator=generator).to(device) + else: + generator = torch.Generator(device).manual_seed(int(seed)) + return torch.randn(size, dtype=dtype, device=device, generator=generator) + + +torchsde._brownian.brownian_interval._randn = torchsde_randn + + class KDiffusionSampler: def __init__(self, funcname, sd_model): denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser -- cgit v1.2.1 From 0fddb4a1c06a6e2122add7eee3b001a6d473baee Mon Sep 17 00:00:00 2001 From: brkirch Date: Wed, 30 Nov 2022 08:02:39 -0500 Subject: Rework MPS randn fix, add randn_like fix torch.manual_seed() already sets a CPU generator, so there is no reason to create a CPU generator manually. torch.randn_like also needs a MPS fix for k-diffusion, but a torch hijack with randn_like already exists so it can also be used for that. --- modules/sd_samplers.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 8b11f569..4c123d3b 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -365,7 +365,10 @@ class TorchHijack: if noise.shape == x.shape: return noise - return torch.randn_like(x) + if x.device.type == 'mps': + return torch.randn_like(x, device=devices.cpu).to(x.device) + else: + return torch.randn_like(x) # MPS fix for randn in torchsde @@ -429,8 +432,7 @@ class KDiffusionSampler: self.model_wrap.step = 0 self.eta = p.eta or opts.eta_ancestral - if self.sampler_noises is not None: - k_diffusion.sampling.torch = TorchHijack(self.sampler_noises) + k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) extra_params_kwargs = {} for param_name in self.extra_params: -- cgit v1.2.1 From 8b0703b8fcdab153958b11f0dd5e5b6b58565fed Mon Sep 17 00:00:00 2001 From: "Alex \"mcmonkey\" Goodwin" Date: Fri, 16 Dec 2022 08:18:29 -0800 Subject: Add a workaround patch for DPM2 a issue DPM2 a and DPM2 a Karras samplers are both affected by an issue described by https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/3483 and can be resolved by a workaround suggested by the k-diffusion author at https://github.com/crowsonkb/k-diffusion/issues/43#issuecomment-1305195666 --- modules/sd_samplers.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 4c123d3b..b8e0ce53 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -494,6 +494,9 @@ class KDiffusionSampler: x = x * sigmas[0] + if self.funcname == "sample_dpm_2_ancestral": # workaround dpm2 a issue + sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + extra_params_kwargs = self.initialize(p) if 'sigma_min' in inspect.signature(self.func).parameters: extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() -- cgit v1.2.1 From 180fdf7809ea18de2d3b04618846d5a4e33c002e Mon Sep 17 00:00:00 2001 From: "Alex \"mcmonkey\" Goodwin" Date: Fri, 16 Dec 2022 08:42:00 -0800 Subject: apply to DPM2 (non-ancestral) as well --- modules/sd_samplers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index b8e0ce53..ae3d8bfa 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -494,7 +494,7 @@ class KDiffusionSampler: x = x * sigmas[0] - if self.funcname == "sample_dpm_2_ancestral": # workaround dpm2 a issue + if self.funcname in ['sample_dpm_2_ancestral', 'sample_dpm_2']: sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) extra_params_kwargs = self.initialize(p) -- cgit v1.2.1 From 7ba9bc2fdbfae8115294962510492faafeb48573 Mon Sep 17 00:00:00 2001 From: "Alex \"mcmonkey\" Goodwin" Date: Sun, 18 Dec 2022 19:16:42 -0800 Subject: fix dpm2 in img2img as well --- modules/sd_samplers.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index ae3d8bfa..1a1b8919 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -454,6 +454,9 @@ class KDiffusionSampler: else: sigmas = self.model_wrap.get_sigmas(steps) + if self.funcname in ['sample_dpm_2_ancestral', 'sample_dpm_2']: + sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + sigma_sched = sigmas[steps - t_enc - 1:] xi = x + noise * sigma_sched[0] -- cgit v1.2.1 From 399b229783a7b5fddab0a258740b4d59d668ee12 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 24 Dec 2022 09:03:45 +0300 Subject: eliminate duplicated code add an option to samplers for skipping next to last sigma --- modules/sd_samplers.py | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 1a1b8919..d26e48dc 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -23,16 +23,16 @@ samplers_k_diffusion = [ ('Euler', 'sample_euler', ['k_euler'], {}), ('LMS', 'sample_lms', ['k_lms'], {}), ('Heun', 'sample_heun', ['k_heun'], {}), - ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {}), + ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), + ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True}), ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}), ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {}), ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}), ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}), ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras'}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}), ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}), ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras'}), @@ -444,9 +444,7 @@ class KDiffusionSampler: return extra_params_kwargs - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps, t_enc = setup_img2img_steps(p, steps) - + def get_sigmas(self, p, steps): if p.sampler_noise_scheduler_override: sigmas = p.sampler_noise_scheduler_override(steps) elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': @@ -454,9 +452,16 @@ class KDiffusionSampler: else: sigmas = self.model_wrap.get_sigmas(steps) - if self.funcname in ['sample_dpm_2_ancestral', 'sample_dpm_2']: + if self.config is not None and self.config.options.get('discard_next_to_last_sigma', False): sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + return sigmas + + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): + steps, t_enc = setup_img2img_steps(p, steps) + + sigmas = self.get_sigmas(p, steps) + sigma_sched = sigmas[steps - t_enc - 1:] xi = x + noise * sigma_sched[0] @@ -488,18 +493,10 @@ class KDiffusionSampler: def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning = None): steps = steps or p.steps - if p.sampler_noise_scheduler_override: - sigmas = p.sampler_noise_scheduler_override(steps) - elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device) - else: - sigmas = self.model_wrap.get_sigmas(steps) + sigmas = self.get_sigmas(p, steps) x = x * sigmas[0] - if self.funcname in ['sample_dpm_2_ancestral', 'sample_dpm_2']: - sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) - extra_params_kwargs = self.initialize(p) if 'sigma_min' in inspect.signature(self.func).parameters: extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() -- cgit v1.2.1 From 11dd79e346bd780bc5c3119df962e7a9c20f2493 Mon Sep 17 00:00:00 2001 From: AbstractQbit <38468635+AbstractQbit@users.noreply.github.com> Date: Sat, 24 Dec 2022 14:00:17 +0300 Subject: Add an option for faster low quality previews --- modules/sd_samplers.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index d26e48dc..fbb56af4 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -106,20 +106,29 @@ def setup_img2img_steps(p, steps=None): return steps, t_enc -def single_sample_to_image(sample): - x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] +def single_sample_to_image(sample, approximation=False): + if approximation: + # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/2 + coefs = torch.tensor( + [[ 0.298, 0.207, 0.208], + [ 0.187, 0.286, 0.173], + [-0.158, 0.189, 0.264], + [-0.184, -0.271, -0.473]]).to(sample.device) + x_sample = torch.einsum("lxy,lr -> rxy", sample, coefs) + else: + x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) return Image.fromarray(x_sample) -def sample_to_image(samples, index=0): - return single_sample_to_image(samples[index]) +def sample_to_image(samples, index=0, approximation=False): + return single_sample_to_image(samples[index], approximation) -def samples_to_image_grid(samples): - return images.image_grid([single_sample_to_image(sample) for sample in samples]) +def samples_to_image_grid(samples, approximation=False): + return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples]) def store_latent(decoded): @@ -127,7 +136,7 @@ def store_latent(decoded): if opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0: if not shared.parallel_processing_allowed: - shared.state.current_image = sample_to_image(decoded) + shared.state.current_image = sample_to_image(decoded, approximation=opts.show_progress_approximate) class InterruptedException(BaseException): -- cgit v1.2.1 From 0b8acce6a9a1418fa88a506450cd1b92e2d48986 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 24 Dec 2022 18:38:16 +0300 Subject: separate part of denoiser code into a function to make it easier for extensions to override it --- modules/sd_samplers.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index d26e48dc..8efe74df 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -288,6 +288,16 @@ class CFGDenoiser(torch.nn.Module): self.init_latent = None self.step = 0 + def combine_denoised(self, x_out, conds_list, uncond, cond_scale): + denoised_uncond = x_out[-uncond.shape[0]:] + denoised = torch.clone(denoised_uncond) + + for i, conds in enumerate(conds_list): + for cond_index, weight in conds: + denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) + + return denoised + def forward(self, x, sigma, uncond, cond, cond_scale, image_cond): if state.interrupted or state.skipped: raise InterruptedException @@ -329,12 +339,7 @@ class CFGDenoiser(torch.nn.Module): x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]}) - denoised_uncond = x_out[-uncond.shape[0]:] - denoised = torch.clone(denoised_uncond) - - for i, conds in enumerate(conds_list): - for cond_index, weight in conds: - denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) if self.mask is not None: denoised = self.init_latent * self.mask + self.nmask * denoised -- cgit v1.2.1 From 56e557c6ff8a6480887c9c585bf908045ee8e791 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 24 Dec 2022 22:39:00 +0300 Subject: added cheap NN approximation for VAE --- modules/sd_samplers.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 27ef4ff8..177b5338 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -9,7 +9,7 @@ import k_diffusion.sampling import torchsde._brownian.brownian_interval import ldm.models.diffusion.ddim import ldm.models.diffusion.plms -from modules import prompt_parser, devices, processing, images +from modules import prompt_parser, devices, processing, images, sd_vae_approx from modules.shared import opts, cmd_opts, state import modules.shared as shared @@ -106,28 +106,31 @@ def setup_img2img_steps(p, steps=None): return steps, t_enc -def single_sample_to_image(sample, approximation=False): - if approximation: - # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/2 - coefs = torch.tensor( - [[ 0.298, 0.207, 0.208], - [ 0.187, 0.286, 0.173], - [-0.158, 0.189, 0.264], - [-0.184, -0.271, -0.473]]).to(sample.device) - x_sample = torch.einsum("lxy,lr -> rxy", sample, coefs) +approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2} + + +def single_sample_to_image(sample, approximation=None): + if approximation is None: + approximation = approximation_indexes.get(opts.show_progress_type, 0) + + if approximation == 2: + x_sample = sd_vae_approx.cheap_approximation(sample) + elif approximation == 1: + x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() else: x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] + x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) return Image.fromarray(x_sample) -def sample_to_image(samples, index=0, approximation=False): +def sample_to_image(samples, index=0, approximation=None): return single_sample_to_image(samples[index], approximation) -def samples_to_image_grid(samples, approximation=False): +def samples_to_image_grid(samples, approximation=None): return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples]) @@ -136,7 +139,7 @@ def store_latent(decoded): if opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0: if not shared.parallel_processing_allowed: - shared.state.current_image = sample_to_image(decoded, approximation=opts.show_progress_approximate) + shared.state.current_image = sample_to_image(decoded) class InterruptedException(BaseException): -- cgit v1.2.1 From 16b9661d2741b241c3964fcbd56559c078b84822 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 09:51:37 +0300 Subject: change karras scheduler sigmas to values recommended by SD from old 0.1 to 10 with an option to revert to old --- modules/sd_samplers.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'modules/sd_samplers.py') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 177b5338..e904d860 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -465,7 +465,9 @@ class KDiffusionSampler: if p.sampler_noise_scheduler_override: sigmas = p.sampler_noise_scheduler_override(steps) elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device) + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + + sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) else: sigmas = self.model_wrap.get_sigmas(steps) -- cgit v1.2.1