diff options
-rw-r--r-- | launch.py | 11 | ||||
-rw-r--r-- | modules/extras.py | 47 | ||||
-rw-r--r-- | modules/img2img.py | 2 | ||||
-rw-r--r-- | modules/processing.py | 22 | ||||
-rw-r--r-- | modules/sd_models.py | 8 | ||||
-rw-r--r-- | modules/sd_samplers.py | 96 | ||||
-rw-r--r-- | modules/shared.py | 7 | ||||
-rw-r--r-- | modules/ui.py | 28 | ||||
-rw-r--r-- | scripts/xy_grid.py | 12 |
9 files changed, 135 insertions, 98 deletions
@@ -15,14 +15,14 @@ torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
-k_diffusion_package = os.environ.get('K_DIFFUSION_PACKAGE', "git+https://github.com/crowsonkb/k-diffusion.git@1a0703dfb7d24d8806267c3e7ccc4caf67fd1331")
+k_diffusion_package = os.environ.get('K_DIFFUSION_PACKAGE', "git+https://github.com/crowsonkb/k-diffusion.git@9e3002b7cd64df7870e08527b7664eb2f2f5f3f5")
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
-ldsr_commit_hash = os.environ.get('LDSR_COMMIT_HASH',"abf33e7002d59d9085081bce93ec798dcabd49af")
+ldsr_commit_hash = os.environ.get('LDSR_COMMIT_HASH', "abf33e7002d59d9085081bce93ec798dcabd49af")
args = shlex.split(commandline_args)
@@ -113,6 +113,13 @@ if not skip_torch_cuda_test: if not is_installed("k_diffusion.sampling"):
run_pip(f"install {k_diffusion_package}", "k-diffusion")
+if not check_run_python("import k_diffusion; import inspect; assert 'eta' in inspect.signature(k_diffusion.sampling.sample_euler_ancestral).parameters"):
+ print(f"k-diffusion does not have 'eta' parameter; reinstalling latest version")
+ try:
+ run_pip(f"install --upgrade --force-reinstall {k_diffusion_package}", "k-diffusion")
+ except RuntimeError as e:
+ print(str(e))
+
if not is_installed("gfpgan"):
run_pip(f"install {gfpgan_package}", "gfpgan")
diff --git a/modules/extras.py b/modules/extras.py index c4ee2b62..15de033a 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -140,7 +140,7 @@ def run_pnginfo(image): return '', geninfo, info
-def run_modelmerger(modelname_0, modelname_1, interp_method, interp_amount):
+def run_modelmerger(primary_model_name, secondary_model_name, interp_method, interp_amount):
# Linear interpolation (https://en.wikipedia.org/wiki/Linear_interpolation)
def weighted_sum(theta0, theta1, alpha):
return ((1 - alpha) * theta0) + (alpha * theta1)
@@ -150,45 +150,52 @@ def run_modelmerger(modelname_0, modelname_1, interp_method, interp_amount): alpha = alpha * alpha * (3 - (2 * alpha))
return theta0 + ((theta1 - theta0) * alpha)
- if os.path.exists(modelname_0):
- model0_filename = modelname_0
- modelname_0 = os.path.splitext(os.path.basename(modelname_0))[0]
+ # Inverse Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
+ def inv_sigmoid(theta0, theta1, alpha):
+ import math
+ alpha = 0.5 - math.sin(math.asin(1.0 - 2.0 * alpha) / 3.0)
+ return theta0 + ((theta1 - theta0) * alpha)
+
+ if os.path.exists(primary_model_name):
+ primary_model_filename = primary_model_name
+ primary_model_name = os.path.splitext(os.path.basename(primary_model_name))[0]
else:
- model0_filename = 'models/' + modelname_0 + '.ckpt'
+ primary_model_filename = 'models/' + primary_model_name + '.ckpt'
- if os.path.exists(modelname_1):
- model1_filename = modelname_1
- modelname_1 = os.path.splitext(os.path.basename(modelname_1))[0]
+ if os.path.exists(secondary_model_name):
+ secondary_model_filename = secondary_model_name
+ secondary_model_name = os.path.splitext(os.path.basename(secondary_model_name))[0]
else:
- model1_filename = 'models/' + modelname_1 + '.ckpt'
+ secondary_model_filename = 'models/' + secondary_model_name + '.ckpt'
- print(f"Loading {model0_filename}...")
- model_0 = torch.load(model0_filename, map_location='cpu')
+ print(f"Loading {primary_model_filename}...")
+ primary_model = torch.load(primary_model_filename, map_location='cpu')
- print(f"Loading {model1_filename}...")
- model_1 = torch.load(model1_filename, map_location='cpu')
-
- theta_0 = model_0['state_dict']
- theta_1 = model_1['state_dict']
+ print(f"Loading {secondary_model_filename}...")
+ secondary_model = torch.load(secondary_model_filename, map_location='cpu')
+
+ theta_0 = primary_model['state_dict']
+ theta_1 = secondary_model['state_dict']
theta_funcs = {
"Weighted Sum": weighted_sum,
"Sigmoid": sigmoid,
+ "Inverse Sigmoid": inv_sigmoid
}
theta_func = theta_funcs[interp_method]
print(f"Merging...")
for key in tqdm.tqdm(theta_0.keys()):
if 'model' in key and key in theta_1:
- theta_0[key] = theta_func(theta_0[key], theta_1[key], interp_amount)
+ theta_0[key] = theta_func(theta_0[key], theta_1[key], (float(1.0) - interp_amount)) # Need to reverse the interp_amount to match the desired mix ration in the merged checkpoint
for key in theta_1.keys():
if 'model' in key and key not in theta_0:
theta_0[key] = theta_1[key]
- output_modelname = 'models/' + modelname_0 + '-' + modelname_1 + '-' + interp_method.replace(" ", "_") + '-' + str(interp_amount) + '-merged.ckpt'
+ output_modelname = 'models/' + primary_model_name + '_' + str(round(interp_amount,2)) + '-' + secondary_model_name + '_' + str(round((float(1.0) - interp_amount),2)) + '-' + interp_method.replace(" ", "_") + '-merged.ckpt'
print(f"Saving to {output_modelname}...")
- torch.save(model_0, output_modelname)
+ torch.save(primary_model, output_modelname)
print(f"Checkpoint saved.")
- return "Checkpoint saved to " + output_modelname
+ return "Checkpoint saved to " + output_modelname
\ No newline at end of file diff --git a/modules/img2img.py b/modules/img2img.py index d80b3e75..03e934e9 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -124,4 +124,4 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro if opts.samples_log_stdout:
print(generation_info_js)
- return processed.images, generation_info_js, plaintext_to_html(processed.info)
\ No newline at end of file + return processed.images, generation_info_js, plaintext_to_html(processed.info)
diff --git a/modules/processing.py b/modules/processing.py index 8d043f4d..4ecdfcd2 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -49,7 +49,7 @@ def apply_color_correction(correction, image): class StableDiffusionProcessing:
- def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", styles=None, seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, seed_enable_extras=True, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None):
+ def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", styles=None, seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, seed_enable_extras=True, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None, eta=None):
self.sd_model = sd_model
self.outpath_samples: str = outpath_samples
self.outpath_grids: str = outpath_grids
@@ -75,15 +75,15 @@ class StableDiffusionProcessing: self.do_not_save_grid: bool = do_not_save_grid
self.extra_generation_params: dict = extra_generation_params or {}
self.overlay_images = overlay_images
+ self.eta = eta
self.paste_to = None
self.color_corrections = None
self.denoising_strength: float = 0
-
- self.ddim_eta = opts.ddim_eta
+
self.ddim_discretize = opts.ddim_discretize
self.s_churn = opts.s_churn
self.s_tmin = opts.s_tmin
- self.s_tmax = float('inf') # not representable as a standard ui option
+ self.s_tmax = float('inf') # not representable as a standard ui option
self.s_noise = opts.s_noise
if not seed_enable_extras:
@@ -100,7 +100,7 @@ class StableDiffusionProcessing: class Processed:
- def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0):
+ def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None):
self.images = images_list
self.prompt = p.prompt
self.negative_prompt = p.negative_prompt
@@ -124,7 +124,7 @@ class Processed: self.extra_generation_params = p.extra_generation_params
self.index_of_first_image = index_of_first_image
- self.ddim_eta = p.ddim_eta
+ self.eta = p.eta
self.ddim_discretize = p.ddim_discretize
self.s_churn = p.s_churn
self.s_tmin = p.s_tmin
@@ -139,6 +139,7 @@ class Processed: self.all_prompts = all_prompts or [self.prompt]
self.all_seeds = all_seeds or [self.seed]
self.all_subseeds = all_subseeds or [self.subseed]
+ self.infotexts = infotexts or [info]
def js(self):
obj = {
@@ -165,6 +166,7 @@ class Processed: "denoising_strength": self.denoising_strength,
"extra_generation_params": self.extra_generation_params,
"index_of_first_image": self.index_of_first_image,
+ "infotexts": self.infotexts,
}
return json.dumps(obj)
@@ -269,6 +271,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
+ "Eta": (None if p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
}
generation_params.update(p.extra_generation_params)
@@ -277,7 +280,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration negative_prompt_text = "\nNegative prompt: " + p.negative_prompt if p.negative_prompt else ""
- return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip() + "".join(["\n\n" + x for x in comments])
+ return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip()
def process_images(p: StableDiffusionProcessing) -> Processed:
@@ -322,6 +325,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if os.path.exists(cmd_opts.embeddings_dir):
model_hijack.load_textual_inversion_embeddings(cmd_opts.embeddings_dir, p.sd_model)
+ infotexts = []
output_images = []
precision_scope = torch.autocast if cmd_opts.precision == "autocast" else contextlib.nullcontext
ema_scope = (contextlib.nullcontext if cmd_opts.lowvram else p.sd_model.ema_scope)
@@ -404,6 +408,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if opts.samples_save and not p.do_not_save_samples:
images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
+ infotexts.append(infotext(n, i))
output_images.append(image)
state.nextjob()
@@ -416,6 +421,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: grid = images.image_grid(output_images, p.batch_size)
if opts.return_grid:
+ infotexts.insert(0, infotext())
output_images.insert(0, grid)
index_of_first_image = 1
@@ -423,7 +429,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: images.save_image(grid, p.outpath_grids, "grid", all_seeds[0], all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True)
devices.torch_gc()
- return Processed(p, output_images, all_seeds[0], infotext(), subseed=all_subseeds[0], all_prompts=all_prompts, all_seeds=all_seeds, all_subseeds=all_subseeds, index_of_first_image=index_of_first_image)
+ return Processed(p, output_images, all_seeds[0], infotext() + "".join(["\n\n" + x for x in comments]), subseed=all_subseeds[0], all_prompts=all_prompts, all_seeds=all_seeds, all_subseeds=all_subseeds, index_of_first_image=index_of_first_image, infotexts=infotexts)
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
diff --git a/modules/sd_models.py b/modules/sd_models.py index dc81b0dc..9decc911 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -10,7 +10,7 @@ from ldm.util import instantiate_from_config from modules import shared
-CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash'])
+CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name'])
checkpoints_list = {}
try:
@@ -45,7 +45,8 @@ def list_models(): if os.path.exists(cmd_ckpt):
h = model_hash(cmd_ckpt)
title = modeltitle(cmd_ckpt, h)
- checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h)
+ model_name = title.rsplit(".",1)[0] # remove extension if present
+ checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, model_name)
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found: {cmd_ckpt}", file=sys.stderr)
@@ -53,7 +54,8 @@ def list_models(): for filename in glob.glob(model_dir + '/**/*.ckpt', recursive=True):
h = model_hash(filename)
title = modeltitle(filename, h)
- checkpoints_list[title] = CheckpointInfo(filename, title, h)
+ model_name = title.rsplit(".",1)[0] # remove extension if present
+ checkpoints_list[title] = CheckpointInfo(filename, title, h, model_name)
def model_hash(filename):
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 666ee1ee..3588aae6 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -3,6 +3,7 @@ import numpy as np import torch
import tqdm
from PIL import Image
+import inspect
import k_diffusion.sampling
import ldm.models.diffusion.ddim
@@ -38,9 +39,9 @@ samplers = [ samplers_for_img2img = [x for x in samplers if x.name != 'PLMS']
sampler_extra_params = {
- 'sample_euler':['s_churn','s_tmin','s_tmax','s_noise'],
- 'sample_heun' :['s_churn','s_tmin','s_tmax','s_noise'],
- 'sample_dpm_2':['s_churn','s_tmin','s_tmax','s_noise'],
+ 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
+ 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
+ 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
}
def setup_img2img_steps(p, steps=None):
@@ -98,6 +99,8 @@ class VanillaStableDiffusionSampler: self.init_latent = None
self.sampler_noises = None
self.step = 0
+ self.eta = None
+ self.default_eta = 0.0
def number_of_needed_noises(self, p):
return 0
@@ -120,20 +123,29 @@ class VanillaStableDiffusionSampler: self.step += 1
return res
+ def initialize(self, p):
+ self.eta = p.eta or opts.eta_ddim
+
+ for fieldname in ['p_sample_ddim', 'p_sample_plms']:
+ if hasattr(self.sampler, fieldname):
+ setattr(self.sampler, fieldname, self.p_sample_ddim_hook)
+
+ self.mask = p.mask if hasattr(p, 'mask') else None
+ self.nmask = p.nmask if hasattr(p, 'nmask') else None
+
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None):
steps, t_enc = setup_img2img_steps(p, steps)
# existing code fails with cetain step counts, like 9
try:
- self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=p.ddim_eta, ddim_discretize=p.ddim_discretize, verbose=False)
+ self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
except Exception:
- self.sampler.make_schedule(ddim_num_steps=steps+1,ddim_eta=p.ddim_eta, ddim_discretize=p.ddim_discretize, verbose=False)
+ self.sampler.make_schedule(ddim_num_steps=steps+1, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
- self.sampler.p_sample_ddim = self.p_sample_ddim_hook
- self.mask = p.mask if hasattr(p, 'mask') else None
- self.nmask = p.nmask if hasattr(p, 'nmask') else None
+ self.initialize(p)
+
self.init_latent = x
self.step = 0
@@ -142,11 +154,8 @@ class VanillaStableDiffusionSampler: return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None):
- for fieldname in ['p_sample_ddim', 'p_sample_plms']:
- if hasattr(self.sampler, fieldname):
- setattr(self.sampler, fieldname, self.p_sample_ddim_hook)
- self.mask = None
- self.nmask = None
+ self.initialize(p)
+
self.init_latent = None
self.step = 0
@@ -154,9 +163,9 @@ class VanillaStableDiffusionSampler: # existing code fails with cetin step counts, like 9
try:
- samples_ddim, _ = self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=p.ddim_eta)
+ samples_ddim, _ = self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)
except Exception:
- samples_ddim, _ = self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=p.ddim_eta)
+ samples_ddim, _ = self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)
return samples_ddim
@@ -229,11 +238,13 @@ class KDiffusionSampler: self.model_wrap = k_diffusion.external.CompVisDenoiser(sd_model, quantize=shared.opts.enable_quantization)
self.funcname = funcname
self.func = getattr(k_diffusion.sampling, self.funcname)
- self.extra_params = sampler_extra_params.get(funcname,[])
+ self.extra_params = sampler_extra_params.get(funcname, [])
self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
self.sampler_noises = None
self.sampler_noise_index = 0
self.stop_at = None
+ self.eta = None
+ self.default_eta = 1.0
def callback_state(self, d):
store_latent(d["denoised"])
@@ -252,22 +263,12 @@ class KDiffusionSampler: self.sampler_noise_index += 1
return res
- def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None):
- steps, t_enc = setup_img2img_steps(p, steps)
-
- sigmas = self.model_wrap.get_sigmas(steps)
-
- noise = noise * sigmas[steps - t_enc - 1]
-
- xi = x + noise
-
- sigma_sched = sigmas[steps - t_enc - 1:]
-
+ def initialize(self, p):
self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
- self.model_wrap_cfg.init_latent = x
self.model_wrap.step = 0
self.sampler_noise_index = 0
+ self.eta = p.eta or opts.eta_ancestral
if hasattr(k_diffusion.sampling, 'trange'):
k_diffusion.sampling.trange = lambda *args, **kwargs: extended_trange(self, *args, **kwargs)
@@ -276,9 +277,28 @@ class KDiffusionSampler: k_diffusion.sampling.torch = TorchHijack(self)
extra_params_kwargs = {}
- for val in self.extra_params:
- if hasattr(p,val):
- extra_params_kwargs[val] = getattr(p,val)
+ for param_name in self.extra_params:
+ if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
+ extra_params_kwargs[param_name] = getattr(p, param_name)
+
+ if 'eta' in inspect.signature(self.func).parameters:
+ extra_params_kwargs['eta'] = self.eta
+
+ return extra_params_kwargs
+
+ def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None):
+ steps, t_enc = setup_img2img_steps(p, steps)
+
+ sigmas = self.model_wrap.get_sigmas(steps)
+
+ noise = noise * sigmas[steps - t_enc - 1]
+ xi = x + noise
+
+ extra_params_kwargs = self.initialize(p)
+
+ sigma_sched = sigmas[steps - t_enc - 1:]
+
+ self.model_wrap_cfg.init_latent = x
return self.func(self.model_wrap_cfg, xi, sigma_sched, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs)
@@ -288,19 +308,7 @@ class KDiffusionSampler: sigmas = self.model_wrap.get_sigmas(steps)
x = x * sigmas[0]
- self.model_wrap_cfg.step = 0
- self.sampler_noise_index = 0
-
- if hasattr(k_diffusion.sampling, 'trange'):
- k_diffusion.sampling.trange = lambda *args, **kwargs: extended_trange(self, *args, **kwargs)
-
- if self.sampler_noises is not None:
- k_diffusion.sampling.torch = TorchHijack(self)
-
- extra_params_kwargs = {}
- for val in self.extra_params:
- if hasattr(p,val):
- extra_params_kwargs[val] = getattr(p,val)
+ extra_params_kwargs = self.initialize(p)
samples = self.func(self.model_wrap_cfg, x, sigmas, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs)
diff --git a/modules/shared.py b/modules/shared.py index 84302438..39cf89bc 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -143,6 +143,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
"use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process in extras tab"),
+ "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
}))
options_templates.update(options_section(('saving-paths', "Paths for saving"), {
@@ -180,7 +181,6 @@ options_templates.update(options_section(('face-restoration', "Face restoration" "face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
- "save_selected_only": OptionInfo(False, "When using 'Save' button, only save a single selected image"),
}))
options_templates.update(options_section(('system', "System"), {
@@ -221,8 +221,9 @@ options_templates.update(options_section(('ui', "User interface"), { }))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
- "ddim_eta": OptionInfo(0.0, "DDIM eta", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform','quad']}),
+ "eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
+ "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
+ "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
diff --git a/modules/ui.py b/modules/ui.py index 4e24eb55..bf736b27 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -12,7 +12,7 @@ import traceback import numpy as np
import torch
-from PIL import Image
+from PIL import Image, PngImagePlugin
import gradio as gr
import gradio.utils
@@ -98,10 +98,11 @@ def save_files(js_data, images, index): filenames = []
data = json.loads(js_data)
-
- if index > -1 and opts.save_selected_only and (index > 0 or not opts.return_grid): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
+ if index > -1 and opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
- data["seed"] += (index - 1 if opts.return_grid else index)
+ infotexts = [data["infotexts"][index]]
+ else:
+ infotexts = data["infotexts"]
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
@@ -117,8 +118,11 @@ def save_files(js_data, images, index): if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
- with open(filepath, "wb") as imgfile:
- imgfile.write(base64.decodebytes(filedata.encode('utf-8')))
+ pnginfo = PngImagePlugin.PngInfo()
+ pnginfo.add_text('parameters', infotexts[i])
+
+ image = Image.open(io.BytesIO(base64.decodebytes(filedata.encode('utf-8'))))
+ image.save(filepath, quality=opts.jpeg_quality, pnginfo=pnginfo)
filenames.append(filename)
@@ -867,10 +871,12 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): with gr.Column(variant='panel'):
gr.HTML(value="<p>A merger of the two checkpoints will be generated in your <b>/models</b> directory.</p>")
- modelname_0 = gr.Textbox(elem_id="modelmerger_modelname_0", label="Model Name (to)")
- modelname_1 = gr.Textbox(elem_id="modelmerger_modelname_1", label="Model Name (from)")
- interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid"], value="Weighted Sum", label="Interpolation Method")
+ with gr.Row():
+ ckpt_name_list = sorted([x.model_name for x in modules.sd_models.checkpoints_list.values()])
+ primary_model_name = gr.Dropdown(ckpt_name_list, elem_id="modelmerger_primary_model_name", label="Primary Model Name")
+ secondary_model_name = gr.Dropdown(ckpt_name_list, elem_id="modelmerger_secondary_model_name", label="Secondary Model Name")
interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation Amount', value=0.3)
+ interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method")
submit = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
with gr.Column(variant='panel'):
@@ -879,8 +885,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): submit.click(
fn=run_modelmerger,
inputs=[
- modelname_0,
- modelname_1,
+ primary_model_name,
+ secondary_model_name,
interp_method,
interp_amount
],
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 7c01231f..24fa5a0a 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -87,12 +87,12 @@ axis_options = [ AxisOption("Prompt S/R", str, apply_prompt, format_value),
AxisOption("Sampler", str, apply_sampler, format_value),
AxisOption("Checkpoint name", str, apply_checkpoint, format_value),
- AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label),
- AxisOption("Sigma min", float, apply_field("s_tmin"), format_value_add_label),
- AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label),
- AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label),
- AxisOption("DDIM Eta", float, apply_field("ddim_eta"), format_value_add_label),
- AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label),# as it is now all AxisOptionImg2Img items must go after AxisOption ones
+ AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label),
+ AxisOption("Sigma min", float, apply_field("s_tmin"), format_value_add_label),
+ AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label),
+ AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label),
+ AxisOption("Eta", float, apply_field("eta"), format_value_add_label),
+ AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones
]
|