aboutsummaryrefslogtreecommitdiff
path: root/modules/sd_samplers_kdiffusion.py
diff options
context:
space:
mode:
authorAUTOMATIC1111 <16777216c@gmail.com>2023-08-08 19:20:11 +0300
committerAUTOMATIC1111 <16777216c@gmail.com>2023-08-08 21:04:44 +0300
commit8285a149d8c488ae6c7a566eb85fb5e825145464 (patch)
tree89f204c4dc44f58ac2f15719f9bdbf28c4590cb5 /modules/sd_samplers_kdiffusion.py
parent2d8e4a654480ea080fec62834331a3c632ed0330 (diff)
add CFG denoiser implementation for DDIM, PLMS and UniPC (this is the commit when you can run both old and new implementations to compare them)
Diffstat (limited to 'modules/sd_samplers_kdiffusion.py')
-rw-r--r--modules/sd_samplers_kdiffusion.py152
1 files changed, 14 insertions, 138 deletions
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 9c9b46d1..3a2e01b7 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -4,8 +4,7 @@ import inspect
import k_diffusion.sampling
from modules import devices, sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser
-from modules.processing import StableDiffusionProcessing
-from modules.shared import opts, state
+from modules.shared import opts
import modules.shared as shared
samplers_k_diffusion = [
@@ -54,133 +53,17 @@ k_diffusion_scheduler = {
}
-class TorchHijack:
- def __init__(self, sampler_noises):
- # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based
- # implementation.
- self.sampler_noises = deque(sampler_noises)
-
- def __getattr__(self, item):
- if item == 'randn_like':
- return self.randn_like
-
- if hasattr(torch, item):
- return getattr(torch, item)
-
- raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'")
-
- def randn_like(self, x):
- if self.sampler_noises:
- noise = self.sampler_noises.popleft()
- if noise.shape == x.shape:
- return noise
+class KDiffusionSampler(sd_samplers_common.Sampler):
+ def __init__(self, funcname, sd_model):
- return devices.randn_like(x)
+ super().__init__(funcname)
+ self.extra_params = sampler_extra_params.get(funcname, [])
+ self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname)
-class KDiffusionSampler:
- def __init__(self, funcname, sd_model):
denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser
-
self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization)
- self.funcname = funcname
- self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname)
- self.extra_params = sampler_extra_params.get(funcname, [])
- self.model_wrap_cfg = sd_samplers_cfg_denoiser.CFGDenoiser(self.model_wrap)
- self.sampler_noises = None
- self.stop_at = None
- self.eta = None
- self.config = None # set by the function calling the constructor
- self.last_latent = None
- self.s_min_uncond = None
-
- # NOTE: These are also defined in the StableDiffusionProcessing class.
- # They should have been here to begin with but we're going to
- # leave that class __init__ signature alone.
- self.s_churn = 0.0
- self.s_tmin = 0.0
- self.s_tmax = float('inf')
- self.s_noise = 1.0
-
- self.conditioning_key = sd_model.model.conditioning_key
-
- def callback_state(self, d):
- step = d['i']
- latent = d["denoised"]
- if opts.live_preview_content == "Combined":
- sd_samplers_common.store_latent(latent)
- self.last_latent = latent
-
- if self.stop_at is not None and step > self.stop_at:
- raise sd_samplers_common.InterruptedException
-
- state.sampling_step = step
- shared.total_tqdm.update()
-
- def launch_sampling(self, steps, func):
- state.sampling_steps = steps
- state.sampling_step = 0
-
- try:
- return func()
- except RecursionError:
- print(
- 'Encountered RecursionError during sampling, returning last latent. '
- 'rho >5 with a polyexponential scheduler may cause this error. '
- 'You should try to use a smaller rho value instead.'
- )
- return self.last_latent
- except sd_samplers_common.InterruptedException:
- return self.last_latent
-
- def number_of_needed_noises(self, p):
- return p.steps
-
- def initialize(self, p: StableDiffusionProcessing):
- self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
- self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
- self.model_wrap_cfg.step = 0
- self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
- self.eta = p.eta if p.eta is not None else opts.eta_ancestral
- self.s_min_uncond = getattr(p, 's_min_uncond', 0.0)
-
- k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else [])
-
- extra_params_kwargs = {}
- for param_name in self.extra_params:
- if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
- extra_params_kwargs[param_name] = getattr(p, param_name)
-
- if 'eta' in inspect.signature(self.func).parameters:
- if self.eta != 1.0:
- p.extra_generation_params["Eta"] = self.eta
-
- extra_params_kwargs['eta'] = self.eta
-
- if len(self.extra_params) > 0:
- s_churn = getattr(opts, 's_churn', p.s_churn)
- s_tmin = getattr(opts, 's_tmin', p.s_tmin)
- s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf
- s_noise = getattr(opts, 's_noise', p.s_noise)
-
- if s_churn != self.s_churn:
- extra_params_kwargs['s_churn'] = s_churn
- p.s_churn = s_churn
- p.extra_generation_params['Sigma churn'] = s_churn
- if s_tmin != self.s_tmin:
- extra_params_kwargs['s_tmin'] = s_tmin
- p.s_tmin = s_tmin
- p.extra_generation_params['Sigma tmin'] = s_tmin
- if s_tmax != self.s_tmax:
- extra_params_kwargs['s_tmax'] = s_tmax
- p.s_tmax = s_tmax
- p.extra_generation_params['Sigma tmax'] = s_tmax
- if s_noise != self.s_noise:
- extra_params_kwargs['s_noise'] = s_noise
- p.s_noise = s_noise
- p.extra_generation_params['Sigma noise'] = s_noise
-
- return extra_params_kwargs
+ self.model_wrap_cfg = sd_samplers_cfg_denoiser.CFGDenoiser(self.model_wrap, self)
def get_sigmas(self, p, steps):
discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False)
@@ -232,22 +115,12 @@ class KDiffusionSampler:
return sigmas
- def create_noise_sampler(self, x, sigmas, p):
- """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes"""
- if shared.opts.no_dpmpp_sde_batch_determinism:
- return None
-
- from k_diffusion.sampling import BrownianTreeNoiseSampler
- sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
- current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size]
- return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds)
-
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps)
sigmas = self.get_sigmas(p, steps)
-
sigma_sched = sigmas[steps - t_enc - 1:]
+
xi = x + noise * sigma_sched[0]
extra_params_kwargs = self.initialize(p)
@@ -296,12 +169,14 @@ class KDiffusionSampler:
extra_params_kwargs = self.initialize(p)
parameters = inspect.signature(self.func).parameters
+ if 'n' in parameters:
+ extra_params_kwargs['n'] = steps
+
if 'sigma_min' in parameters:
extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()
extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()
- if 'n' in parameters:
- extra_params_kwargs['n'] = steps
- else:
+
+ if 'sigmas' in parameters:
extra_params_kwargs['sigmas'] = sigmas
if self.config.options.get('brownian_noise', False):
@@ -322,3 +197,4 @@ class KDiffusionSampler:
return samples
+