aboutsummaryrefslogtreecommitdiff
path: root/extensions-builtin
diff options
context:
space:
mode:
Diffstat (limited to 'extensions-builtin')
-rw-r--r--extensions-builtin/LDSR/ldsr_model_arch.py13
-rw-r--r--extensions-builtin/LDSR/scripts/ldsr_model.py29
-rw-r--r--extensions-builtin/LDSR/sd_hijack_autoencoder.py33
-rw-r--r--extensions-builtin/LDSR/sd_hijack_ddpm_v1.py70
-rw-r--r--extensions-builtin/LDSR/vqvae_quantize.py147
-rw-r--r--extensions-builtin/Lora/extra_networks_lora.py22
-rw-r--r--extensions-builtin/Lora/lora.py90
-rw-r--r--extensions-builtin/Lora/scripts/lora_script.py38
-rw-r--r--extensions-builtin/Lora/ui_extra_networks_lora.py9
-rw-r--r--extensions-builtin/ScuNET/scripts/scunet_model.py48
-rw-r--r--extensions-builtin/ScuNET/scunet_model_arch.py11
-rw-r--r--extensions-builtin/SwinIR/scripts/swinir_model.py64
-rw-r--r--extensions-builtin/SwinIR/swinir_model_arch.py6
-rw-r--r--extensions-builtin/SwinIR/swinir_model_arch_v2.py58
-rw-r--r--extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js776
-rw-r--r--extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py14
-rw-r--r--extensions-builtin/canvas-zoom-and-pan/style.css63
-rw-r--r--extensions-builtin/extra-options-section/scripts/extra_options_section.py48
-rw-r--r--extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js52
19 files changed, 1368 insertions, 223 deletions
diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py
index bc11cc6e..7f450086 100644
--- a/extensions-builtin/LDSR/ldsr_model_arch.py
+++ b/extensions-builtin/LDSR/ldsr_model_arch.py
@@ -88,7 +88,7 @@ class LDSR:
x_t = None
logs = None
- for n in range(n_runs):
+ for _ in range(n_runs):
if custom_shape is not None:
x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])
@@ -110,7 +110,6 @@ class LDSR:
diffusion_steps = int(steps)
eta = 1.0
- down_sample_method = 'Lanczos'
gc.collect()
if torch.cuda.is_available:
@@ -131,11 +130,11 @@ class LDSR:
im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
else:
print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")
-
+
# pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts
pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size
im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))
-
+
logs = self.run(model["model"], im_padded, diffusion_steps, eta)
sample = logs["sample"]
@@ -158,7 +157,7 @@ class LDSR:
def get_cond(selected_path):
- example = dict()
+ example = {}
up_f = 4
c = selected_path.convert('RGB')
c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
@@ -196,7 +195,7 @@ def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_s
@torch.no_grad()
def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):
- log = dict()
+ log = {}
z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
return_first_stage_outputs=True,
@@ -244,7 +243,7 @@ def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize
x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
log["sample_noquant"] = x_sample_noquant
log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
- except:
+ except Exception:
pass
log["sample"] = x_sample
diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py
index da19cff1..bd78dece 100644
--- a/extensions-builtin/LDSR/scripts/ldsr_model.py
+++ b/extensions-builtin/LDSR/scripts/ldsr_model.py
@@ -1,13 +1,11 @@
import os
-import sys
-import traceback
-
-from basicsr.utils.download_util import load_file_from_url
+from modules.modelloader import load_file_from_url
from modules.upscaler import Upscaler, UpscalerData
from ldsr_model_arch import LDSR
-from modules import shared, script_callbacks
-import sd_hijack_autoencoder, sd_hijack_ddpm_v1
+from modules import shared, script_callbacks, errors
+import sd_hijack_autoencoder # noqa: F401
+import sd_hijack_ddpm_v1 # noqa: F401
class UpscalerLDSR(Upscaler):
@@ -44,22 +42,17 @@ class UpscalerLDSR(Upscaler):
if local_safetensors_path is not None and os.path.exists(local_safetensors_path):
model = local_safetensors_path
else:
- model = local_ckpt_path if local_ckpt_path is not None else load_file_from_url(url=self.model_url, model_dir=self.model_path, file_name="model.ckpt", progress=True)
-
- yaml = local_yaml_path if local_yaml_path is not None else load_file_from_url(url=self.yaml_url, model_dir=self.model_path, file_name="project.yaml", progress=True)
+ model = local_ckpt_path or load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name="model.ckpt")
- try:
- return LDSR(model, yaml)
+ yaml = local_yaml_path or load_file_from_url(self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml")
- except Exception:
- print("Error importing LDSR:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
- return None
+ return LDSR(model, yaml)
def do_upscale(self, img, path):
- ldsr = self.load_model(path)
- if ldsr is None:
- print("NO LDSR!")
+ try:
+ ldsr = self.load_model(path)
+ except Exception:
+ errors.report(f"Failed loading LDSR model {path}", exc_info=True)
return img
ddim_steps = shared.opts.ldsr_steps
return ldsr.super_resolution(img, ddim_steps, self.scale)
diff --git a/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/extensions-builtin/LDSR/sd_hijack_autoencoder.py
index 8e03c7f8..c29d274d 100644
--- a/extensions-builtin/LDSR/sd_hijack_autoencoder.py
+++ b/extensions-builtin/LDSR/sd_hijack_autoencoder.py
@@ -1,16 +1,21 @@
# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo
# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo
# As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder
-
+import numpy as np
import torch
import pytorch_lightning as pl
import torch.nn.functional as F
from contextlib import contextmanager
-from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
+
+from torch.optim.lr_scheduler import LambdaLR
+
+from ldm.modules.ema import LitEma
+from vqvae_quantize import VectorQuantizer2 as VectorQuantizer
from ldm.modules.diffusionmodules.model import Encoder, Decoder
from ldm.util import instantiate_from_config
import ldm.models.autoencoder
+from packaging import version
class VQModel(pl.LightningModule):
def __init__(self,
@@ -19,7 +24,7 @@ class VQModel(pl.LightningModule):
n_embed,
embed_dim,
ckpt_path=None,
- ignore_keys=[],
+ ignore_keys=None,
image_key="image",
colorize_nlabels=None,
monitor=None,
@@ -57,7 +62,7 @@ class VQModel(pl.LightningModule):
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [])
self.scheduler_config = scheduler_config
self.lr_g_factor = lr_g_factor
@@ -76,18 +81,19 @@ class VQModel(pl.LightningModule):
if context is not None:
print(f"{context}: Restored training weights")
- def init_from_ckpt(self, path, ignore_keys=list()):
+ def init_from_ckpt(self, path, ignore_keys=None):
sd = torch.load(path, map_location="cpu")["state_dict"]
keys = list(sd.keys())
for k in keys:
- for ik in ignore_keys:
+ for ik in ignore_keys or []:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
missing, unexpected = self.load_state_dict(sd, strict=False)
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
- if len(missing) > 0:
+ if missing:
print(f"Missing Keys: {missing}")
+ if unexpected:
print(f"Unexpected Keys: {unexpected}")
def on_train_batch_end(self, *args, **kwargs):
@@ -165,7 +171,7 @@ class VQModel(pl.LightningModule):
def validation_step(self, batch, batch_idx):
log_dict = self._validation_step(batch, batch_idx)
with self.ema_scope():
- log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema")
+ self._validation_step(batch, batch_idx, suffix="_ema")
return log_dict
def _validation_step(self, batch, batch_idx, suffix=""):
@@ -232,7 +238,7 @@ class VQModel(pl.LightningModule):
return self.decoder.conv_out.weight
def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
- log = dict()
+ log = {}
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
if only_inputs:
@@ -249,7 +255,8 @@ class VQModel(pl.LightningModule):
if plot_ema:
with self.ema_scope():
xrec_ema, _ = self(x)
- if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema)
+ if x.shape[1] > 3:
+ xrec_ema = self.to_rgb(xrec_ema)
log["reconstructions_ema"] = xrec_ema
return log
@@ -264,7 +271,7 @@ class VQModel(pl.LightningModule):
class VQModelInterface(VQModel):
def __init__(self, embed_dim, *args, **kwargs):
- super().__init__(embed_dim=embed_dim, *args, **kwargs)
+ super().__init__(*args, embed_dim=embed_dim, **kwargs)
self.embed_dim = embed_dim
def encode(self, x):
@@ -282,5 +289,5 @@ class VQModelInterface(VQModel):
dec = self.decoder(quant)
return dec
-setattr(ldm.models.autoencoder, "VQModel", VQModel)
-setattr(ldm.models.autoencoder, "VQModelInterface", VQModelInterface)
+ldm.models.autoencoder.VQModel = VQModel
+ldm.models.autoencoder.VQModelInterface = VQModelInterface
diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
index 5c0488e5..04adc5eb 100644
--- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
+++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
@@ -48,7 +48,7 @@ class DDPMV1(pl.LightningModule):
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
- ignore_keys=[],
+ ignore_keys=None,
load_only_unet=False,
monitor="val/loss",
use_ema=True,
@@ -100,7 +100,7 @@ class DDPMV1(pl.LightningModule):
if monitor is not None:
self.monitor = monitor
if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet)
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
@@ -182,22 +182,22 @@ class DDPMV1(pl.LightningModule):
if context is not None:
print(f"{context}: Restored training weights")
- def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
+ def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
sd = torch.load(path, map_location="cpu")
if "state_dict" in list(sd.keys()):
sd = sd["state_dict"]
keys = list(sd.keys())
for k in keys:
- for ik in ignore_keys:
+ for ik in ignore_keys or []:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
sd, strict=False)
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
- if len(missing) > 0:
+ if missing:
print(f"Missing Keys: {missing}")
- if len(unexpected) > 0:
+ if unexpected:
print(f"Unexpected Keys: {unexpected}")
def q_mean_variance(self, x_start, t):
@@ -375,7 +375,7 @@ class DDPMV1(pl.LightningModule):
@torch.no_grad()
def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
- log = dict()
+ log = {}
x = self.get_input(batch, self.first_stage_key)
N = min(x.shape[0], N)
n_row = min(x.shape[0], n_row)
@@ -383,7 +383,7 @@ class DDPMV1(pl.LightningModule):
log["inputs"] = x
# get diffusion row
- diffusion_row = list()
+ diffusion_row = []
x_start = x[:n_row]
for t in range(self.num_timesteps):
@@ -444,13 +444,13 @@ class LatentDiffusionV1(DDPMV1):
conditioning_key = None
ckpt_path = kwargs.pop("ckpt_path", None)
ignore_keys = kwargs.pop("ignore_keys", [])
- super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
+ super().__init__(*args, conditioning_key=conditioning_key, **kwargs)
self.concat_mode = concat_mode
self.cond_stage_trainable = cond_stage_trainable
self.cond_stage_key = cond_stage_key
try:
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
- except:
+ except Exception:
self.num_downs = 0
if not scale_by_std:
self.scale_factor = scale_factor
@@ -460,7 +460,7 @@ class LatentDiffusionV1(DDPMV1):
self.instantiate_cond_stage(cond_stage_config)
self.cond_stage_forward = cond_stage_forward
self.clip_denoised = False
- self.bbox_tokenizer = None
+ self.bbox_tokenizer = None
self.restarted_from_ckpt = False
if ckpt_path is not None:
@@ -792,7 +792,7 @@ class LatentDiffusionV1(DDPMV1):
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
# 2. apply model loop over last dim
- if isinstance(self.first_stage_model, VQModelInterface):
+ if isinstance(self.first_stage_model, VQModelInterface):
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
force_not_quantize=predict_cids or force_not_quantize)
for i in range(z.shape[-1])]
@@ -877,16 +877,6 @@ class LatentDiffusionV1(DDPMV1):
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
return self.p_losses(x, c, t, *args, **kwargs)
- def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
- def rescale_bbox(bbox):
- x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
- y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
- w = min(bbox[2] / crop_coordinates[2], 1 - x0)
- h = min(bbox[3] / crop_coordinates[3], 1 - y0)
- return x0, y0, w, h
-
- return [rescale_bbox(b) for b in bboxes]
-
def apply_model(self, x_noisy, t, cond, return_ids=False):
if isinstance(cond, dict):
@@ -900,7 +890,7 @@ class LatentDiffusionV1(DDPMV1):
if hasattr(self, "split_input_params"):
assert len(cond) == 1 # todo can only deal with one conditioning atm
- assert not return_ids
+ assert not return_ids
ks = self.split_input_params["ks"] # eg. (128, 128)
stride = self.split_input_params["stride"] # eg. (64, 64)
@@ -1126,7 +1116,7 @@ class LatentDiffusionV1(DDPMV1):
if cond is not None:
if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
+ [x[:batch_size] for x in cond[key]] for key in cond}
else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
@@ -1157,8 +1147,10 @@ class LatentDiffusionV1(DDPMV1):
if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(x0_partial)
- if callback: callback(i)
- if img_callback: img_callback(img, i)
+ if callback:
+ callback(i)
+ if img_callback:
+ img_callback(img, i)
return img, intermediates
@torch.no_grad()
@@ -1205,8 +1197,10 @@ class LatentDiffusionV1(DDPMV1):
if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(img)
- if callback: callback(i)
- if img_callback: img_callback(img, i)
+ if callback:
+ callback(i)
+ if img_callback:
+ img_callback(img, i)
if return_intermediates:
return img, intermediates
@@ -1221,7 +1215,7 @@ class LatentDiffusionV1(DDPMV1):
if cond is not None:
if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
+ [x[:batch_size] for x in cond[key]] for key in cond}
else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
return self.p_sample_loop(cond,
@@ -1253,7 +1247,7 @@ class LatentDiffusionV1(DDPMV1):
use_ddim = ddim_steps is not None
- log = dict()
+ log = {}
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
return_first_stage_outputs=True,
force_c_encode=True,
@@ -1280,7 +1274,7 @@ class LatentDiffusionV1(DDPMV1):
if plot_diffusion_rows:
# get diffusion row
- diffusion_row = list()
+ diffusion_row = []
z_start = z[:n_row]
for t in range(self.num_timesteps):
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
@@ -1322,7 +1316,7 @@ class LatentDiffusionV1(DDPMV1):
if inpaint:
# make a simple center square
- b, h, w = z.shape[0], z.shape[2], z.shape[3]
+ h, w = z.shape[2], z.shape[3]
mask = torch.ones(N, h, w).to(self.device)
# zeros will be filled in
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
@@ -1424,10 +1418,10 @@ class Layout2ImgDiffusionV1(LatentDiffusionV1):
# TODO: move all layout-specific hacks to this class
def __init__(self, cond_stage_key, *args, **kwargs):
assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
- super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs)
+ super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs)
def log_images(self, batch, N=8, *args, **kwargs):
- logs = super().log_images(batch=batch, N=N, *args, **kwargs)
+ logs = super().log_images(*args, batch=batch, N=N, **kwargs)
key = 'train' if self.training else 'validation'
dset = self.trainer.datamodule.datasets[key]
@@ -1443,7 +1437,7 @@ class Layout2ImgDiffusionV1(LatentDiffusionV1):
logs['bbox_image'] = cond_img
return logs
-setattr(ldm.models.diffusion.ddpm, "DDPMV1", DDPMV1)
-setattr(ldm.models.diffusion.ddpm, "LatentDiffusionV1", LatentDiffusionV1)
-setattr(ldm.models.diffusion.ddpm, "DiffusionWrapperV1", DiffusionWrapperV1)
-setattr(ldm.models.diffusion.ddpm, "Layout2ImgDiffusionV1", Layout2ImgDiffusionV1)
+ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1
+ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1
+ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1
+ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1
diff --git a/extensions-builtin/LDSR/vqvae_quantize.py b/extensions-builtin/LDSR/vqvae_quantize.py
new file mode 100644
index 00000000..dd14b8fd
--- /dev/null
+++ b/extensions-builtin/LDSR/vqvae_quantize.py
@@ -0,0 +1,147 @@
+# Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/taming/modules/vqvae/quantize.py,
+# where the license is as follows:
+#
+# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
+# OR OTHER DEALINGS IN THE SOFTWARE./
+
+import torch
+import torch.nn as nn
+import numpy as np
+from einops import rearrange
+
+
+class VectorQuantizer2(nn.Module):
+ """
+ Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
+ avoids costly matrix multiplications and allows for post-hoc remapping of indices.
+ """
+
+ # NOTE: due to a bug the beta term was applied to the wrong term. for
+ # backwards compatibility we use the buggy version by default, but you can
+ # specify legacy=False to fix it.
+ def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
+ sane_index_shape=False, legacy=True):
+ super().__init__()
+ self.n_e = n_e
+ self.e_dim = e_dim
+ self.beta = beta
+ self.legacy = legacy
+
+ self.embedding = nn.Embedding(self.n_e, self.e_dim)
+ self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
+
+ self.remap = remap
+ if self.remap is not None:
+ self.register_buffer("used", torch.tensor(np.load(self.remap)))
+ self.re_embed = self.used.shape[0]
+ self.unknown_index = unknown_index # "random" or "extra" or integer
+ if self.unknown_index == "extra":
+ self.unknown_index = self.re_embed
+ self.re_embed = self.re_embed + 1
+ print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
+ f"Using {self.unknown_index} for unknown indices.")
+ else:
+ self.re_embed = n_e
+
+ self.sane_index_shape = sane_index_shape
+
+ def remap_to_used(self, inds):
+ ishape = inds.shape
+ assert len(ishape) > 1
+ inds = inds.reshape(ishape[0], -1)
+ used = self.used.to(inds)
+ match = (inds[:, :, None] == used[None, None, ...]).long()
+ new = match.argmax(-1)
+ unknown = match.sum(2) < 1
+ if self.unknown_index == "random":
+ new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
+ else:
+ new[unknown] = self.unknown_index
+ return new.reshape(ishape)
+
+ def unmap_to_all(self, inds):
+ ishape = inds.shape
+ assert len(ishape) > 1
+ inds = inds.reshape(ishape[0], -1)
+ used = self.used.to(inds)
+ if self.re_embed > self.used.shape[0]: # extra token
+ inds[inds >= self.used.shape[0]] = 0 # simply set to zero
+ back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
+ return back.reshape(ishape)
+
+ def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
+ assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel"
+ assert rescale_logits is False, "Only for interface compatible with Gumbel"
+ assert return_logits is False, "Only for interface compatible with Gumbel"
+ # reshape z -> (batch, height, width, channel) and flatten
+ z = rearrange(z, 'b c h w -> b h w c').contiguous()
+ z_flattened = z.view(-1, self.e_dim)
+ # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
+
+ d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
+ torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \
+ torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
+
+ min_encoding_indices = torch.argmin(d, dim=1)
+ z_q = self.embedding(min_encoding_indices).view(z.shape)
+ perplexity = None
+ min_encodings = None
+
+ # compute loss for embedding
+ if not self.legacy:
+ loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + \
+ torch.mean((z_q - z.detach()) ** 2)
+ else:
+ loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * \
+ torch.mean((z_q - z.detach()) ** 2)
+
+ # preserve gradients
+ z_q = z + (z_q - z).detach()
+
+ # reshape back to match original input shape
+ z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
+
+ if self.remap is not None:
+ min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
+ min_encoding_indices = self.remap_to_used(min_encoding_indices)
+ min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
+
+ if self.sane_index_shape:
+ min_encoding_indices = min_encoding_indices.reshape(
+ z_q.shape[0], z_q.shape[2], z_q.shape[3])
+
+ return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
+
+ def get_codebook_entry(self, indices, shape):
+ # shape specifying (batch, height, width, channel)
+ if self.remap is not None:
+ indices = indices.reshape(shape[0], -1) # add batch axis
+ indices = self.unmap_to_all(indices)
+ indices = indices.reshape(-1) # flatten again
+
+ # get quantized latent vectors
+ z_q = self.embedding(indices)
+
+ if shape is not None:
+ z_q = z_q.view(shape)
+ # reshape back to match original input shape
+ z_q = z_q.permute(0, 3, 1, 2).contiguous()
+
+ return z_q
diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py
index ccb249ac..66ee9c85 100644
--- a/extensions-builtin/Lora/extra_networks_lora.py
+++ b/extensions-builtin/Lora/extra_networks_lora.py
@@ -9,19 +9,37 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
def activate(self, p, params_list):
additional = shared.opts.sd_lora
- if additional != "None" and additional in lora.available_loras and len([x for x in params_list if x.items[0] == additional]) == 0:
+ if additional != "None" and additional in lora.available_loras and not any(x for x in params_list if x.items[0] == additional):
p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
names = []
multipliers = []
for params in params_list:
- assert len(params.items) > 0
+ assert params.items
names.append(params.items[0])
multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0)
lora.load_loras(names, multipliers)
+ if shared.opts.lora_add_hashes_to_infotext:
+ lora_hashes = []
+ for item in lora.loaded_loras:
+ shorthash = item.lora_on_disk.shorthash
+ if not shorthash:
+ continue
+
+ alias = item.mentioned_name
+ if not alias:
+ continue
+
+ alias = alias.replace(":", "").replace(",", "")
+
+ lora_hashes.append(f"{alias}: {shorthash}")
+
+ if lora_hashes:
+ p.extra_generation_params["Lora hashes"] = ", ".join(lora_hashes)
+
def deactivate(self, p):
pass
diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py
index b5d0c98f..cd46e6c7 100644
--- a/extensions-builtin/Lora/lora.py
+++ b/extensions-builtin/Lora/lora.py
@@ -1,10 +1,9 @@
-import glob
import os
import re
import torch
from typing import Union
-from modules import shared, devices, sd_models, errors, scripts
+from modules import shared, devices, sd_models, errors, scripts, sd_hijack, hashes
metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20}
@@ -77,9 +76,9 @@ class LoraOnDisk:
self.name = name
self.filename = filename
self.metadata = {}
+ self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors"
- _, ext = os.path.splitext(filename)
- if ext.lower() == ".safetensors":
+ if self.is_safetensors:
try:
self.metadata = sd_models.read_metadata_from_safetensors(filename)
except Exception as e:
@@ -95,14 +94,43 @@ class LoraOnDisk:
self.ssmd_cover_images = self.metadata.pop('ssmd_cover_images', None) # those are cover images and they are too big to display in UI as text
self.alias = self.metadata.get('ss_output_name', self.name)
+ self.hash = None
+ self.shorthash = None
+ self.set_hash(
+ self.metadata.get('sshs_model_hash') or
+ hashes.sha256_from_cache(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or
+ ''
+ )
+
+ def set_hash(self, v):
+ self.hash = v
+ self.shorthash = self.hash[0:12]
+
+ if self.shorthash:
+ available_lora_hash_lookup[self.shorthash] = self
+
+ def read_hash(self):
+ if not self.hash:
+ self.set_hash(hashes.sha256(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or '')
+
+ def get_alias(self):
+ if shared.opts.lora_preferred_name == "Filename" or self.alias.lower() in forbidden_lora_aliases:
+ return self.name
+ else:
+ return self.alias
+
class LoraModule:
- def __init__(self, name):
+ def __init__(self, name, lora_on_disk: LoraOnDisk):
self.name = name
+ self.lora_on_disk = lora_on_disk
self.multiplier = 1.0
self.modules = {}
self.mtime = None
+ self.mentioned_name = None
+ """the text that was used to add lora to prompt - can be either name or an alias"""
+
class LoraUpDownModule:
def __init__(self):
@@ -127,11 +155,11 @@ def assign_lora_names_to_compvis_modules(sd_model):
sd_model.lora_layer_mapping = lora_layer_mapping
-def load_lora(name, filename):
- lora = LoraModule(name)
- lora.mtime = os.path.getmtime(filename)
+def load_lora(name, lora_on_disk):
+ lora = LoraModule(name, lora_on_disk)
+ lora.mtime = os.path.getmtime(lora_on_disk.filename)
- sd = sd_models.read_state_dict(filename)
+ sd = sd_models.read_state_dict(lora_on_disk.filename)
# this should not be needed but is here as an emergency fix for an unknown error people are experiencing in 1.2.0
if not hasattr(shared.sd_model, 'lora_layer_mapping'):
@@ -177,7 +205,7 @@ def load_lora(name, filename):
else:
print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}')
continue
- assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}'
+ raise AssertionError(f"Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}")
with torch.no_grad():
module.weight.copy_(weight)
@@ -189,10 +217,10 @@ def load_lora(name, filename):
elif lora_key == "lora_down.weight":
lora_module.down = module
else:
- assert False, f'Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha'
+ raise AssertionError(f"Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha")
- if len(keys_failed_to_match) > 0:
- print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}")
+ if keys_failed_to_match:
+ print(f"Failed to match keys when loading Lora {lora_on_disk.filename}: {keys_failed_to_match}")
return lora
@@ -207,30 +235,41 @@ def load_loras(names, multipliers=None):
loaded_loras.clear()
loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
- if any([x is None for x in loras_on_disk]):
+ if any(x is None for x in loras_on_disk):
list_available_loras()
loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
+ failed_to_load_loras = []
+
for i, name in enumerate(names):
lora = already_loaded.get(name, None)
lora_on_disk = loras_on_disk[i]
+
if lora_on_disk is not None:
if lora is None or os.path.getmtime(lora_on_disk.filename) > lora.mtime:
try:
- lora = load_lora(name, lora_on_disk.filename)
+ lora = load_lora(name, lora_on_disk)
except Exception as e:
errors.display(e, f"loading Lora {lora_on_disk.filename}")
continue
+ lora.mentioned_name = name
+
+ lora_on_disk.read_hash()
+
if lora is None:
+ failed_to_load_loras.append(name)
print(f"Couldn't find Lora with name {name}")
continue
lora.multiplier = multipliers[i] if multipliers else 1.0
loaded_loras.append(lora)
+ if failed_to_load_loras:
+ sd_hijack.model_hijack.comments.append("Failed to find Loras: " + ", ".join(failed_to_load_loras))
+
def lora_calc_updown(lora, module, target):
with torch.no_grad():
@@ -314,7 +353,7 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu
print(f'failed to calculate lora weights for layer {lora_layer_name}')
- setattr(self, "lora_current_names", wanted_names)
+ self.lora_current_names = wanted_names
def lora_forward(module, input, original_forward):
@@ -348,8 +387,8 @@ def lora_forward(module, input, original_forward):
def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
- setattr(self, "lora_current_names", ())
- setattr(self, "lora_weights_backup", None)
+ self.lora_current_names = ()
+ self.lora_weights_backup = None
def lora_Linear_forward(self, input):
@@ -398,17 +437,22 @@ def list_available_loras():
available_loras.clear()
available_lora_aliases.clear()
forbidden_lora_aliases.clear()
- forbidden_lora_aliases.update({"none": 1})
+ available_lora_hash_lookup.clear()
+ forbidden_lora_aliases.update({"none": 1, "Addams": 1})
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
- for filename in sorted(candidates, key=str.lower):
+ for filename in candidates:
if os.path.isdir(filename):
continue
name = os.path.splitext(os.path.basename(filename))[0]
- entry = LoraOnDisk(name, filename)
+ try:
+ entry = LoraOnDisk(name, filename)
+ except OSError: # should catch FileNotFoundError and PermissionError etc.
+ errors.report(f"Failed to load LoRA {name} from {filename}", exc_info=True)
+ continue
available_loras[name] = entry
@@ -428,7 +472,7 @@ def infotext_pasted(infotext, params):
added = []
- for k, v in params.items():
+ for k in params:
if not k.startswith("AddNet Model "):
continue
@@ -452,8 +496,10 @@ def infotext_pasted(infotext, params):
if added:
params["Prompt"] += "\n" + "".join(added)
+
available_loras = {}
available_lora_aliases = {}
+available_lora_hash_lookup = {}
forbidden_lora_aliases = {}
loaded_loras = []
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py
index 060bda05..e650f469 100644
--- a/extensions-builtin/Lora/scripts/lora_script.py
+++ b/extensions-builtin/Lora/scripts/lora_script.py
@@ -1,3 +1,5 @@
+import re
+
import torch
import gradio as gr
from fastapi import FastAPI
@@ -53,8 +55,9 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted)
shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
- "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras),
- "lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
+ "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None", *lora.available_loras]}, refresh=lora.list_available_loras),
+ "lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
+ "lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
}))
@@ -77,6 +80,37 @@ def api_loras(_: gr.Blocks, app: FastAPI):
async def get_loras():
return [create_lora_json(obj) for obj in lora.available_loras.values()]
+ @app.post("/sdapi/v1/refresh-loras")
+ async def refresh_loras():
+ return lora.list_available_loras()
+
script_callbacks.on_app_started(api_loras)
+re_lora = re.compile("<lora:([^:]+):")
+
+
+def infotext_pasted(infotext, d):
+ hashes = d.get("Lora hashes")
+ if not hashes:
+ return
+
+ hashes = [x.strip().split(':', 1) for x in hashes.split(",")]
+ hashes = {x[0].strip().replace(",", ""): x[1].strip() for x in hashes}
+
+ def lora_replacement(m):
+ alias = m.group(1)
+ shorthash = hashes.get(alias)
+ if shorthash is None:
+ return m.group(0)
+
+ lora_on_disk = lora.available_lora_hash_lookup.get(shorthash)
+ if lora_on_disk is None:
+ return m.group(0)
+
+ return f'<lora:{lora_on_disk.get_alias()}:'
+
+ d["Prompt"] = re.sub(re_lora, lora_replacement, d["Prompt"])
+
+
+script_callbacks.on_infotext_pasted(infotext_pasted)
diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py
index 2050e3fa..da49790b 100644
--- a/extensions-builtin/Lora/ui_extra_networks_lora.py
+++ b/extensions-builtin/Lora/ui_extra_networks_lora.py
@@ -13,13 +13,10 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
lora.list_available_loras()
def list_items(self):
- for name, lora_on_disk in lora.available_loras.items():
+ for index, (name, lora_on_disk) in enumerate(lora.available_loras.items()):
path, ext = os.path.splitext(lora_on_disk.filename)
- if shared.opts.lora_preferred_name == "Filename" or lora_on_disk.alias.lower() in lora.forbidden_lora_aliases:
- alias = name
- else:
- alias = lora_on_disk.alias
+ alias = lora_on_disk.get_alias()
yield {
"name": name,
@@ -30,6 +27,8 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
"prompt": json.dumps(f"<lora:{alias}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
"local_preview": f"{path}.{shared.opts.samples_format}",
"metadata": json.dumps(lora_on_disk.metadata, indent=4) if lora_on_disk.metadata else None,
+ "sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)},
+
}
def allowed_directories_for_previews(self):
diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py
index c7fd5739..ffef26b2 100644
--- a/extensions-builtin/ScuNET/scripts/scunet_model.py
+++ b/extensions-builtin/ScuNET/scripts/scunet_model.py
@@ -1,19 +1,16 @@
-import os.path
import sys
-import traceback
import PIL.Image
import numpy as np
import torch
from tqdm import tqdm
-from basicsr.utils.download_util import load_file_from_url
-
import modules.upscaler
-from modules import devices, modelloader
-from scunet_model_arch import SCUNet as net
+from modules import devices, modelloader, script_callbacks, errors
+from scunet_model_arch import SCUNet
+
+from modules.modelloader import load_file_from_url
from modules.shared import opts
-from modules import images
class UpscalerScuNET(modules.upscaler.Upscaler):
@@ -29,7 +26,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
scalers = []
add_model2 = True
for file in model_paths:
- if "http" in file:
+ if file.startswith("http"):
name = self.model_name
else:
name = modelloader.friendly_name(file)
@@ -39,8 +36,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
scaler_data = modules.upscaler.UpscalerData(name, file, self, 4)
scalers.append(scaler_data)
except Exception:
- print(f"Error loading ScuNET model: {file}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ errors.report(f"Error loading ScuNET model: {file}", exc_info=True)
if add_model2:
scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self)
scalers.append(scaler_data2)
@@ -91,9 +87,10 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
torch.cuda.empty_cache()
- model = self.load_model(selected_file)
- if model is None:
- print(f"ScuNET: Unable to load model from {selected_file}", file=sys.stderr)
+ try:
+ model = self.load_model(selected_file)
+ except Exception as e:
+ print(f"ScuNET: Unable to load model from {selected_file}: {e}", file=sys.stderr)
return img
device = devices.get_device_for('scunet')
@@ -121,20 +118,27 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
def load_model(self, path: str):
device = devices.get_device_for('scunet')
- if "http" in path:
- filename = load_file_from_url(url=self.model_url, model_dir=self.model_path, file_name="%s.pth" % self.name,
- progress=True)
+ if path.startswith("http"):
+ # TODO: this doesn't use `path` at all?
+ filename = load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name=f"{self.name}.pth")
else:
filename = path
- if not os.path.exists(os.path.join(self.model_path, filename)) or filename is None:
- print(f"ScuNET: Unable to load model from {filename}", file=sys.stderr)
- return None
-
- model = net(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64)
+ model = SCUNet(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64)
model.load_state_dict(torch.load(filename), strict=True)
model.eval()
- for k, v in model.named_parameters():
+ for _, v in model.named_parameters():
v.requires_grad = False
model = model.to(device)
return model
+
+
+def on_ui_settings():
+ import gradio as gr
+ from modules import shared
+
+ shared.opts.add_option("SCUNET_tile", shared.OptionInfo(256, "Tile size for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")).info("0 = no tiling"))
+ shared.opts.add_option("SCUNET_tile_overlap", shared.OptionInfo(8, "Tile overlap for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, section=('upscaling', "Upscaling")).info("Low values = visible seam"))
+
+
+script_callbacks.on_ui_settings(on_ui_settings)
diff --git a/extensions-builtin/ScuNET/scunet_model_arch.py b/extensions-builtin/ScuNET/scunet_model_arch.py
index 43ca8d36..b51a8806 100644
--- a/extensions-builtin/ScuNET/scunet_model_arch.py
+++ b/extensions-builtin/ScuNET/scunet_model_arch.py
@@ -61,7 +61,9 @@ class WMSA(nn.Module):
Returns:
output: tensor shape [b h w c]
"""
- if self.type != 'W': x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2))
+ if self.type != 'W':
+ x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2))
+
x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size)
h_windows = x.size(1)
w_windows = x.size(2)
@@ -85,8 +87,9 @@ class WMSA(nn.Module):
output = self.linear(output)
output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size)
- if self.type != 'W': output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2),
- dims=(1, 2))
+ if self.type != 'W':
+ output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), dims=(1, 2))
+
return output
def relative_embedding(self):
@@ -262,4 +265,4 @@ class SCUNet(nn.Module):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0) \ No newline at end of file
+ nn.init.constant_(m.weight, 1.0)
diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py
index e8783bca..c6bc53a8 100644
--- a/extensions-builtin/SwinIR/scripts/swinir_model.py
+++ b/extensions-builtin/SwinIR/scripts/swinir_model.py
@@ -1,18 +1,17 @@
-import contextlib
-import os
+import sys
import numpy as np
import torch
from PIL import Image
-from basicsr.utils.download_util import load_file_from_url
from tqdm import tqdm
from modules import modelloader, devices, script_callbacks, shared
-from modules.shared import cmd_opts, opts, state
-from swinir_model_arch import SwinIR as net
-from swinir_model_arch_v2 import Swin2SR as net2
+from modules.shared import opts, state
+from swinir_model_arch import SwinIR
+from swinir_model_arch_v2 import Swin2SR
from modules.upscaler import Upscaler, UpscalerData
+SWINIR_MODEL_URL = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth"
device_swinir = devices.get_device_for('swinir')
@@ -20,16 +19,14 @@ device_swinir = devices.get_device_for('swinir')
class UpscalerSwinIR(Upscaler):
def __init__(self, dirname):
self.name = "SwinIR"
- self.model_url = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0" \
- "/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR" \
- "-L_x4_GAN.pth "
+ self.model_url = SWINIR_MODEL_URL
self.model_name = "SwinIR 4x"
self.user_path = dirname
super().__init__()
scalers = []
model_files = self.find_models(ext_filter=[".pt", ".pth"])
for model in model_files:
- if "http" in model:
+ if model.startswith("http"):
name = self.model_name
else:
name = modelloader.friendly_name(model)
@@ -38,42 +35,45 @@ class UpscalerSwinIR(Upscaler):
self.scalers = scalers
def do_upscale(self, img, model_file):
- model = self.load_model(model_file)
- if model is None:
+ try:
+ model = self.load_model(model_file)
+ except Exception as e:
+ print(f"Failed loading SwinIR model {model_file}: {e}", file=sys.stderr)
return img
model = model.to(device_swinir, dtype=devices.dtype)
img = upscale(img, model)
try:
torch.cuda.empty_cache()
- except:
+ except Exception:
pass
return img
def load_model(self, path, scale=4):
- if "http" in path:
- dl_name = "%s%s" % (self.model_name.replace(" ", "_"), ".pth")
- filename = load_file_from_url(url=path, model_dir=self.model_path, file_name=dl_name, progress=True)
+ if path.startswith("http"):
+ filename = modelloader.load_file_from_url(
+ url=path,
+ model_dir=self.model_download_path,
+ file_name=f"{self.model_name.replace(' ', '_')}.pth",
+ )
else:
filename = path
- if filename is None or not os.path.exists(filename):
- return None
if filename.endswith(".v2.pth"):
- model = net2(
- upscale=scale,
- in_chans=3,
- img_size=64,
- window_size=8,
- img_range=1.0,
- depths=[6, 6, 6, 6, 6, 6],
- embed_dim=180,
- num_heads=[6, 6, 6, 6, 6, 6],
- mlp_ratio=2,
- upsampler="nearest+conv",
- resi_connection="1conv",
+ model = Swin2SR(
+ upscale=scale,
+ in_chans=3,
+ img_size=64,
+ window_size=8,
+ img_range=1.0,
+ depths=[6, 6, 6, 6, 6, 6],
+ embed_dim=180,
+ num_heads=[6, 6, 6, 6, 6, 6],
+ mlp_ratio=2,
+ upsampler="nearest+conv",
+ resi_connection="1conv",
)
params = None
else:
- model = net(
+ model = SwinIR(
upscale=scale,
in_chans=3,
img_size=64,
@@ -151,7 +151,7 @@ def inference(img, model, tile, tile_overlap, window_size, scale):
for w_idx in w_idx_list:
if state.interrupted or state.skipped:
break
-
+
in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
out_patch = model(in_patch)
out_patch_mask = torch.ones_like(out_patch)
diff --git a/extensions-builtin/SwinIR/swinir_model_arch.py b/extensions-builtin/SwinIR/swinir_model_arch.py
index 863f42db..93b93274 100644
--- a/extensions-builtin/SwinIR/swinir_model_arch.py
+++ b/extensions-builtin/SwinIR/swinir_model_arch.py
@@ -644,7 +644,7 @@ class SwinIR(nn.Module):
"""
def __init__(self, img_size=64, patch_size=1, in_chans=3,
- embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6],
+ embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
@@ -805,7 +805,7 @@ class SwinIR(nn.Module):
def forward(self, x):
H, W = x.shape[2:]
x = self.check_image_size(x)
-
+
self.mean = self.mean.type_as(x)
x = (x - self.mean) * self.img_range
@@ -844,7 +844,7 @@ class SwinIR(nn.Module):
H, W = self.patches_resolution
flops += H * W * 3 * self.embed_dim * 9
flops += self.patch_embed.flops()
- for i, layer in enumerate(self.layers):
+ for layer in self.layers:
flops += layer.flops()
flops += H * W * 3 * self.embed_dim * self.embed_dim
flops += self.upsample.flops()
diff --git a/extensions-builtin/SwinIR/swinir_model_arch_v2.py b/extensions-builtin/SwinIR/swinir_model_arch_v2.py
index 0e28ae6e..dad22cca 100644
--- a/extensions-builtin/SwinIR/swinir_model_arch_v2.py
+++ b/extensions-builtin/SwinIR/swinir_model_arch_v2.py
@@ -74,7 +74,7 @@ class WindowAttention(nn.Module):
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.,
- pretrained_window_size=[0, 0]):
+ pretrained_window_size=(0, 0)):
super().__init__()
self.dim = dim
@@ -241,7 +241,7 @@ class SwinTransformerBlock(nn.Module):
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
-
+
def calculate_mask(self, x_size):
# calculate attention mask for SW-MSA
H, W = x_size
@@ -263,7 +263,7 @@ class SwinTransformerBlock(nn.Module):
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
- return attn_mask
+ return attn_mask
def forward(self, x, x_size):
H, W = x_size
@@ -288,7 +288,7 @@ class SwinTransformerBlock(nn.Module):
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
else:
attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
-
+
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
@@ -369,7 +369,7 @@ class PatchMerging(nn.Module):
H, W = self.input_resolution
flops = (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
flops += H * W * self.dim // 2
- return flops
+ return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
@@ -447,7 +447,7 @@ class BasicLayer(nn.Module):
nn.init.constant_(blk.norm1.weight, 0)
nn.init.constant_(blk.norm2.bias, 0)
nn.init.constant_(blk.norm2.weight, 0)
-
+
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
@@ -492,7 +492,7 @@ class PatchEmbed(nn.Module):
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
- return flops
+ return flops
class RSTB(nn.Module):
"""Residual Swin Transformer Block (RSTB).
@@ -531,7 +531,7 @@ class RSTB(nn.Module):
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
+ qkv_bias=qkv_bias,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path,
norm_layer=norm_layer,
@@ -622,7 +622,7 @@ class Upsample(nn.Sequential):
else:
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
super(Upsample, self).__init__(*m)
-
+
class Upsample_hf(nn.Sequential):
"""Upsample module.
@@ -642,7 +642,7 @@ class Upsample_hf(nn.Sequential):
m.append(nn.PixelShuffle(3))
else:
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
- super(Upsample_hf, self).__init__(*m)
+ super(Upsample_hf, self).__init__(*m)
class UpsampleOneStep(nn.Sequential):
@@ -667,8 +667,8 @@ class UpsampleOneStep(nn.Sequential):
H, W = self.input_resolution
flops = H * W * self.num_feat * 3 * 9
return flops
-
-
+
+
class Swin2SR(nn.Module):
r""" Swin2SR
@@ -698,8 +698,8 @@ class Swin2SR(nn.Module):
"""
def __init__(self, img_size=64, patch_size=1, in_chans=3,
- embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6],
- window_size=7, mlp_ratio=4., qkv_bias=True,
+ embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
+ window_size=7, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',
@@ -764,7 +764,7 @@ class Swin2SR(nn.Module):
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
- qkv_bias=qkv_bias,
+ qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
norm_layer=norm_layer,
@@ -776,7 +776,7 @@ class Swin2SR(nn.Module):
)
self.layers.append(layer)
-
+
if self.upsampler == 'pixelshuffle_hf':
self.layers_hf = nn.ModuleList()
for i_layer in range(self.num_layers):
@@ -787,7 +787,7 @@ class Swin2SR(nn.Module):
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
- qkv_bias=qkv_bias,
+ qkv_bias=qkv_bias,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
norm_layer=norm_layer,
@@ -799,7 +799,7 @@ class Swin2SR(nn.Module):
)
self.layers_hf.append(layer)
-
+
self.norm = norm_layer(self.num_features)
# build the last conv layer in deep feature extraction
@@ -829,10 +829,10 @@ class Swin2SR(nn.Module):
self.conv_aux = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
self.conv_after_aux = nn.Sequential(
nn.Conv2d(3, num_feat, 3, 1, 1),
- nn.LeakyReLU(inplace=True))
+ nn.LeakyReLU(inplace=True))
self.upsample = Upsample(upscale, num_feat)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
-
+
elif self.upsampler == 'pixelshuffle_hf':
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
nn.LeakyReLU(inplace=True))
@@ -846,7 +846,7 @@ class Swin2SR(nn.Module):
nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
nn.LeakyReLU(inplace=True))
self.conv_last_hf = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
-
+
elif self.upsampler == 'pixelshuffledirect':
# for lightweight SR (to save parameters)
self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
@@ -905,7 +905,7 @@ class Swin2SR(nn.Module):
x = self.patch_unembed(x, x_size)
return x
-
+
def forward_features_hf(self, x):
x_size = (x.shape[2], x.shape[3])
x = self.patch_embed(x)
@@ -919,7 +919,7 @@ class Swin2SR(nn.Module):
x = self.norm(x) # B L C
x = self.patch_unembed(x, x_size)
- return x
+ return x
def forward(self, x):
H, W = x.shape[2:]
@@ -951,7 +951,7 @@ class Swin2SR(nn.Module):
x = self.conv_after_body(self.forward_features(x)) + x
x_before = self.conv_before_upsample(x)
x_out = self.conv_last(self.upsample(x_before))
-
+
x_hf = self.conv_first_hf(x_before)
x_hf = self.conv_after_body_hf(self.forward_features_hf(x_hf)) + x_hf
x_hf = self.conv_before_upsample_hf(x_hf)
@@ -977,15 +977,15 @@ class Swin2SR(nn.Module):
x_first = self.conv_first(x)
res = self.conv_after_body(self.forward_features(x_first)) + x_first
x = x + self.conv_last(res)
-
+
x = x / self.img_range + self.mean
if self.upsampler == "pixelshuffle_aux":
return x[:, :, :H*self.upscale, :W*self.upscale], aux
-
+
elif self.upsampler == "pixelshuffle_hf":
x_out = x_out / self.img_range + self.mean
return x_out[:, :, :H*self.upscale, :W*self.upscale], x[:, :, :H*self.upscale, :W*self.upscale], x_hf[:, :, :H*self.upscale, :W*self.upscale]
-
+
else:
return x[:, :, :H*self.upscale, :W*self.upscale]
@@ -994,7 +994,7 @@ class Swin2SR(nn.Module):
H, W = self.patches_resolution
flops += H * W * 3 * self.embed_dim * 9
flops += self.patch_embed.flops()
- for i, layer in enumerate(self.layers):
+ for layer in self.layers:
flops += layer.flops()
flops += H * W * 3 * self.embed_dim * self.embed_dim
flops += self.upsample.flops()
@@ -1014,4 +1014,4 @@ if __name__ == '__main__':
x = torch.randn((1, 3, height, width))
x = model(x)
- print(x.shape) \ No newline at end of file
+ print(x.shape)
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
new file mode 100644
index 00000000..30199dcd
--- /dev/null
+++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
@@ -0,0 +1,776 @@
+onUiLoaded(async() => {
+ const elementIDs = {
+ img2imgTabs: "#mode_img2img .tab-nav",
+ inpaint: "#img2maskimg",
+ inpaintSketch: "#inpaint_sketch",
+ rangeGroup: "#img2img_column_size",
+ sketch: "#img2img_sketch"
+ };
+ const tabNameToElementId = {
+ "Inpaint sketch": elementIDs.inpaintSketch,
+ "Inpaint": elementIDs.inpaint,
+ "Sketch": elementIDs.sketch
+ };
+
+ // Helper functions
+ // Get active tab
+ function getActiveTab(elements, all = false) {
+ const tabs = elements.img2imgTabs.querySelectorAll("button");
+
+ if (all) return tabs;
+
+ for (let tab of tabs) {
+ if (tab.classList.contains("selected")) {
+ return tab;
+ }
+ }
+ }
+
+ // Get tab ID
+ function getTabId(elements) {
+ const activeTab = getActiveTab(elements);
+ return tabNameToElementId[activeTab.innerText];
+ }
+
+ // Wait until opts loaded
+ async function waitForOpts() {
+ for (;;) {
+ if (window.opts && Object.keys(window.opts).length) {
+ return window.opts;
+ }
+ await new Promise(resolve => setTimeout(resolve, 100));
+ }
+ }
+
+ // Function for defining the "Ctrl", "Shift" and "Alt" keys
+ function isModifierKey(event, key) {
+ switch (key) {
+ case "Ctrl":
+ return event.ctrlKey;
+ case "Shift":
+ return event.shiftKey;
+ case "Alt":
+ return event.altKey;
+ default:
+ return false;
+ }
+ }
+
+ // Check if hotkey is valid
+ function isValidHotkey(value) {
+ const specialKeys = ["Ctrl", "Alt", "Shift", "Disable"];
+ return (
+ (typeof value === "string" &&
+ value.length === 1 &&
+ /[a-z]/i.test(value)) ||
+ specialKeys.includes(value)
+ );
+ }
+
+ // Normalize hotkey
+ function normalizeHotkey(hotkey) {
+ return hotkey.length === 1 ? "Key" + hotkey.toUpperCase() : hotkey;
+ }
+
+ // Format hotkey for display
+ function formatHotkeyForDisplay(hotkey) {
+ return hotkey.startsWith("Key") ? hotkey.slice(3) : hotkey;
+ }
+
+ // Create hotkey configuration with the provided options
+ function createHotkeyConfig(defaultHotkeysConfig, hotkeysConfigOpts) {
+ const result = {}; // Resulting hotkey configuration
+ const usedKeys = new Set(); // Set of used hotkeys
+
+ // Iterate through defaultHotkeysConfig keys
+ for (const key in defaultHotkeysConfig) {
+ const userValue = hotkeysConfigOpts[key]; // User-provided hotkey value
+ const defaultValue = defaultHotkeysConfig[key]; // Default hotkey value
+
+ // Apply appropriate value for undefined, boolean, or object userValue
+ if (
+ userValue === undefined ||
+ typeof userValue === "boolean" ||
+ typeof userValue === "object" ||
+ userValue === "disable"
+ ) {
+ result[key] =
+ userValue === undefined ? defaultValue : userValue;
+ } else if (isValidHotkey(userValue)) {
+ const normalizedUserValue = normalizeHotkey(userValue);
+
+ // Check for conflicting hotkeys
+ if (!usedKeys.has(normalizedUserValue)) {
+ usedKeys.add(normalizedUserValue);
+ result[key] = normalizedUserValue;
+ } else {
+ console.error(
+ `Hotkey: ${formatHotkeyForDisplay(
+ userValue
+ )} for ${key} is repeated and conflicts with another hotkey. The default hotkey is used: ${formatHotkeyForDisplay(
+ defaultValue
+ )}`
+ );
+ result[key] = defaultValue;
+ }
+ } else {
+ console.error(
+ `Hotkey: ${formatHotkeyForDisplay(
+ userValue
+ )} for ${key} is not valid. The default hotkey is used: ${formatHotkeyForDisplay(
+ defaultValue
+ )}`
+ );
+ result[key] = defaultValue;
+ }
+ }
+
+ return result;
+ }
+
+ // Disables functions in the config object based on the provided list of function names
+ function disableFunctions(config, disabledFunctions) {
+ // Bind the hasOwnProperty method to the functionMap object to avoid errors
+ const hasOwnProperty =
+ Object.prototype.hasOwnProperty.bind(functionMap);
+
+ // Loop through the disabledFunctions array and disable the corresponding functions in the config object
+ disabledFunctions.forEach(funcName => {
+ if (hasOwnProperty(funcName)) {
+ const key = functionMap[funcName];
+ config[key] = "disable";
+ }
+ });
+
+ // Return the updated config object
+ return config;
+ }
+
+ /**
+ * The restoreImgRedMask function displays a red mask around an image to indicate the aspect ratio.
+ * If the image display property is set to 'none', the mask breaks. To fix this, the function
+ * temporarily sets the display property to 'block' and then hides the mask again after 300 milliseconds
+ * to avoid breaking the canvas. Additionally, the function adjusts the mask to work correctly on
+ * very long images.
+ */
+ function restoreImgRedMask(elements) {
+ const mainTabId = getTabId(elements);
+
+ if (!mainTabId) return;
+
+ const mainTab = gradioApp().querySelector(mainTabId);
+ const img = mainTab.querySelector("img");
+ const imageARPreview = gradioApp().querySelector("#imageARPreview");
+
+ if (!img || !imageARPreview) return;
+
+ imageARPreview.style.transform = "";
+ if (parseFloat(mainTab.style.width) > 865) {
+ const transformString = mainTab.style.transform;
+ const scaleMatch = transformString.match(
+ /scale\(([-+]?[0-9]*\.?[0-9]+)\)/
+ );
+ let zoom = 1; // default zoom
+
+ if (scaleMatch && scaleMatch[1]) {
+ zoom = Number(scaleMatch[1]);
+ }
+
+ imageARPreview.style.transformOrigin = "0 0";
+ imageARPreview.style.transform = `scale(${zoom})`;
+ }
+
+ if (img.style.display !== "none") return;
+
+ img.style.display = "block";
+
+ setTimeout(() => {
+ img.style.display = "none";
+ }, 400);
+ }
+
+ const hotkeysConfigOpts = await waitForOpts();
+
+ // Default config
+ const defaultHotkeysConfig = {
+ canvas_hotkey_zoom: "Alt",
+ canvas_hotkey_adjust: "Ctrl",
+ canvas_hotkey_reset: "KeyR",
+ canvas_hotkey_fullscreen: "KeyS",
+ canvas_hotkey_move: "KeyF",
+ canvas_hotkey_overlap: "KeyO",
+ canvas_disabled_functions: [],
+ canvas_show_tooltip: true,
+ canvas_blur_prompt: false
+ };
+
+ const functionMap = {
+ "Zoom": "canvas_hotkey_zoom",
+ "Adjust brush size": "canvas_hotkey_adjust",
+ "Moving canvas": "canvas_hotkey_move",
+ "Fullscreen": "canvas_hotkey_fullscreen",
+ "Reset Zoom": "canvas_hotkey_reset",
+ "Overlap": "canvas_hotkey_overlap"
+ };
+
+ // Loading the configuration from opts
+ const preHotkeysConfig = createHotkeyConfig(
+ defaultHotkeysConfig,
+ hotkeysConfigOpts
+ );
+
+ // Disable functions that are not needed by the user
+ const hotkeysConfig = disableFunctions(
+ preHotkeysConfig,
+ preHotkeysConfig.canvas_disabled_functions
+ );
+
+ let isMoving = false;
+ let mouseX, mouseY;
+ let activeElement;
+
+ const elements = Object.fromEntries(
+ Object.keys(elementIDs).map(id => [
+ id,
+ gradioApp().querySelector(elementIDs[id])
+ ])
+ );
+ const elemData = {};
+
+ // Apply functionality to the range inputs. Restore redmask and correct for long images.
+ const rangeInputs = elements.rangeGroup ?
+ Array.from(elements.rangeGroup.querySelectorAll("input")) :
+ [
+ gradioApp().querySelector("#img2img_width input[type='range']"),
+ gradioApp().querySelector("#img2img_height input[type='range']")
+ ];
+
+ for (const input of rangeInputs) {
+ input?.addEventListener("input", () => restoreImgRedMask(elements));
+ }
+
+ function applyZoomAndPan(elemId) {
+ const targetElement = gradioApp().querySelector(elemId);
+
+ if (!targetElement) {
+ console.log("Element not found");
+ return;
+ }
+
+ targetElement.style.transformOrigin = "0 0";
+
+ elemData[elemId] = {
+ zoom: 1,
+ panX: 0,
+ panY: 0
+ };
+ let fullScreenMode = false;
+
+ // Create tooltip
+ function createTooltip() {
+ const toolTipElemnt =
+ targetElement.querySelector(".image-container");
+ const tooltip = document.createElement("div");
+ tooltip.className = "canvas-tooltip";
+
+ // Creating an item of information
+ const info = document.createElement("i");
+ info.className = "canvas-tooltip-info";
+ info.textContent = "";
+
+ // Create a container for the contents of the tooltip
+ const tooltipContent = document.createElement("div");
+ tooltipContent.className = "canvas-tooltip-content";
+
+ // Define an array with hotkey information and their actions
+ const hotkeysInfo = [
+ {
+ configKey: "canvas_hotkey_zoom",
+ action: "Zoom canvas",
+ keySuffix: " + wheel"
+ },
+ {
+ configKey: "canvas_hotkey_adjust",
+ action: "Adjust brush size",
+ keySuffix: " + wheel"
+ },
+ {configKey: "canvas_hotkey_reset", action: "Reset zoom"},
+ {
+ configKey: "canvas_hotkey_fullscreen",
+ action: "Fullscreen mode"
+ },
+ {configKey: "canvas_hotkey_move", action: "Move canvas"},
+ {configKey: "canvas_hotkey_overlap", action: "Overlap"}
+ ];
+
+ // Create hotkeys array with disabled property based on the config values
+ const hotkeys = hotkeysInfo.map(info => {
+ const configValue = hotkeysConfig[info.configKey];
+ const key = info.keySuffix ?
+ `${configValue}${info.keySuffix}` :
+ configValue.charAt(configValue.length - 1);
+ return {
+ key,
+ action: info.action,
+ disabled: configValue === "disable"
+ };
+ });
+
+ for (const hotkey of hotkeys) {
+ if (hotkey.disabled) {
+ continue;
+ }
+
+ const p = document.createElement("p");
+ p.innerHTML = `<b>${hotkey.key}</b> - ${hotkey.action}`;
+ tooltipContent.appendChild(p);
+ }
+
+ // Add information and content elements to the tooltip element
+ tooltip.appendChild(info);
+ tooltip.appendChild(tooltipContent);
+
+ // Add a hint element to the target element
+ toolTipElemnt.appendChild(tooltip);
+ }
+
+ //Show tool tip if setting enable
+ if (hotkeysConfig.canvas_show_tooltip) {
+ createTooltip();
+ }
+
+ // In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui.
+ function fixCanvas() {
+ const activeTab = getActiveTab(elements).textContent.trim();
+
+ if (activeTab !== "img2img") {
+ const img = targetElement.querySelector(`${elemId} img`);
+
+ if (img && img.style.display !== "none") {
+ img.style.display = "none";
+ img.style.visibility = "hidden";
+ }
+ }
+ }
+
+ // Reset the zoom level and pan position of the target element to their initial values
+ function resetZoom() {
+ elemData[elemId] = {
+ zoomLevel: 1,
+ panX: 0,
+ panY: 0
+ };
+
+ fixCanvas();
+ targetElement.style.transform = `scale(${elemData[elemId].zoomLevel}) translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px)`;
+
+ const canvas = gradioApp().querySelector(
+ `${elemId} canvas[key="interface"]`
+ );
+
+ toggleOverlap("off");
+ fullScreenMode = false;
+
+ if (
+ canvas &&
+ parseFloat(canvas.style.width) > 865 &&
+ parseFloat(targetElement.style.width) > 865
+ ) {
+ fitToElement();
+ return;
+ }
+
+ targetElement.style.width = "";
+ if (canvas) {
+ targetElement.style.height = canvas.style.height;
+ }
+ }
+
+ // Toggle the zIndex of the target element between two values, allowing it to overlap or be overlapped by other elements
+ function toggleOverlap(forced = "") {
+ const zIndex1 = "0";
+ const zIndex2 = "998";
+
+ targetElement.style.zIndex =
+ targetElement.style.zIndex !== zIndex2 ? zIndex2 : zIndex1;
+
+ if (forced === "off") {
+ targetElement.style.zIndex = zIndex1;
+ } else if (forced === "on") {
+ targetElement.style.zIndex = zIndex2;
+ }
+ }
+
+ // Adjust the brush size based on the deltaY value from a mouse wheel event
+ function adjustBrushSize(
+ elemId,
+ deltaY,
+ withoutValue = false,
+ percentage = 5
+ ) {
+ const input =
+ gradioApp().querySelector(
+ `${elemId} input[aria-label='Brush radius']`
+ ) ||
+ gradioApp().querySelector(
+ `${elemId} button[aria-label="Use brush"]`
+ );
+
+ if (input) {
+ input.click();
+ if (!withoutValue) {
+ const maxValue =
+ parseFloat(input.getAttribute("max")) || 100;
+ const changeAmount = maxValue * (percentage / 100);
+ const newValue =
+ parseFloat(input.value) +
+ (deltaY > 0 ? -changeAmount : changeAmount);
+ input.value = Math.min(Math.max(newValue, 0), maxValue);
+ input.dispatchEvent(new Event("change"));
+ }
+ }
+ }
+
+ // Reset zoom when uploading a new image
+ const fileInput = gradioApp().querySelector(
+ `${elemId} input[type="file"][accept="image/*"].svelte-116rqfv`
+ );
+ fileInput.addEventListener("click", resetZoom);
+
+ // Update the zoom level and pan position of the target element based on the values of the zoomLevel, panX and panY variables
+ function updateZoom(newZoomLevel, mouseX, mouseY) {
+ newZoomLevel = Math.max(0.5, Math.min(newZoomLevel, 15));
+
+ elemData[elemId].panX +=
+ mouseX - (mouseX * newZoomLevel) / elemData[elemId].zoomLevel;
+ elemData[elemId].panY +=
+ mouseY - (mouseY * newZoomLevel) / elemData[elemId].zoomLevel;
+
+ targetElement.style.transformOrigin = "0 0";
+ targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${newZoomLevel})`;
+
+ toggleOverlap("on");
+ return newZoomLevel;
+ }
+
+ // Change the zoom level based on user interaction
+ function changeZoomLevel(operation, e) {
+ if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) {
+ e.preventDefault();
+
+ let zoomPosX, zoomPosY;
+ let delta = 0.2;
+ if (elemData[elemId].zoomLevel > 7) {
+ delta = 0.9;
+ } else if (elemData[elemId].zoomLevel > 2) {
+ delta = 0.6;
+ }
+
+ zoomPosX = e.clientX;
+ zoomPosY = e.clientY;
+
+ fullScreenMode = false;
+ elemData[elemId].zoomLevel = updateZoom(
+ elemData[elemId].zoomLevel +
+ (operation === "+" ? delta : -delta),
+ zoomPosX - targetElement.getBoundingClientRect().left,
+ zoomPosY - targetElement.getBoundingClientRect().top
+ );
+ }
+ }
+
+ /**
+ * This function fits the target element to the screen by calculating
+ * the required scale and offsets. It also updates the global variables
+ * zoomLevel, panX, and panY to reflect the new state.
+ */
+
+ function fitToElement() {
+ //Reset Zoom
+ targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
+
+ // Get element and screen dimensions
+ const elementWidth = targetElement.offsetWidth;
+ const elementHeight = targetElement.offsetHeight;
+ const parentElement = targetElement.parentElement;
+ const screenWidth = parentElement.clientWidth;
+ const screenHeight = parentElement.clientHeight;
+
+ // Get element's coordinates relative to the parent element
+ const elementRect = targetElement.getBoundingClientRect();
+ const parentRect = parentElement.getBoundingClientRect();
+ const elementX = elementRect.x - parentRect.x;
+
+ // Calculate scale and offsets
+ const scaleX = screenWidth / elementWidth;
+ const scaleY = screenHeight / elementHeight;
+ const scale = Math.min(scaleX, scaleY);
+
+ const transformOrigin =
+ window.getComputedStyle(targetElement).transformOrigin;
+ const [originX, originY] = transformOrigin.split(" ");
+ const originXValue = parseFloat(originX);
+ const originYValue = parseFloat(originY);
+
+ const offsetX =
+ (screenWidth - elementWidth * scale) / 2 -
+ originXValue * (1 - scale);
+ const offsetY =
+ (screenHeight - elementHeight * scale) / 2.5 -
+ originYValue * (1 - scale);
+
+ // Apply scale and offsets to the element
+ targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
+
+ // Update global variables
+ elemData[elemId].zoomLevel = scale;
+ elemData[elemId].panX = offsetX;
+ elemData[elemId].panY = offsetY;
+
+ fullScreenMode = false;
+ toggleOverlap("off");
+ }
+
+ /**
+ * This function fits the target element to the screen by calculating
+ * the required scale and offsets. It also updates the global variables
+ * zoomLevel, panX, and panY to reflect the new state.
+ */
+
+ // Fullscreen mode
+ function fitToScreen() {
+ const canvas = gradioApp().querySelector(
+ `${elemId} canvas[key="interface"]`
+ );
+
+ if (!canvas) return;
+
+ if (canvas.offsetWidth > 862) {
+ targetElement.style.width = canvas.offsetWidth + "px";
+ }
+
+ if (fullScreenMode) {
+ resetZoom();
+ fullScreenMode = false;
+ return;
+ }
+
+ //Reset Zoom
+ targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
+
+ // Get scrollbar width to right-align the image
+ const scrollbarWidth =
+ window.innerWidth - document.documentElement.clientWidth;
+
+ // Get element and screen dimensions
+ const elementWidth = targetElement.offsetWidth;
+ const elementHeight = targetElement.offsetHeight;
+ const screenWidth = window.innerWidth - scrollbarWidth;
+ const screenHeight = window.innerHeight;
+
+ // Get element's coordinates relative to the page
+ const elementRect = targetElement.getBoundingClientRect();
+ const elementY = elementRect.y;
+ const elementX = elementRect.x;
+
+ // Calculate scale and offsets
+ const scaleX = screenWidth / elementWidth;
+ const scaleY = screenHeight / elementHeight;
+ const scale = Math.min(scaleX, scaleY);
+
+ // Get the current transformOrigin
+ const computedStyle = window.getComputedStyle(targetElement);
+ const transformOrigin = computedStyle.transformOrigin;
+ const [originX, originY] = transformOrigin.split(" ");
+ const originXValue = parseFloat(originX);
+ const originYValue = parseFloat(originY);
+
+ // Calculate offsets with respect to the transformOrigin
+ const offsetX =
+ (screenWidth - elementWidth * scale) / 2 -
+ elementX -
+ originXValue * (1 - scale);
+ const offsetY =
+ (screenHeight - elementHeight * scale) / 2 -
+ elementY -
+ originYValue * (1 - scale);
+
+ // Apply scale and offsets to the element
+ targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
+
+ // Update global variables
+ elemData[elemId].zoomLevel = scale;
+ elemData[elemId].panX = offsetX;
+ elemData[elemId].panY = offsetY;
+
+ fullScreenMode = true;
+ toggleOverlap("on");
+ }
+
+ // Handle keydown events
+ function handleKeyDown(event) {
+ // Disable key locks to make pasting from the buffer work correctly
+ if ((event.ctrlKey && event.code === 'KeyV') || (event.ctrlKey && event.code === 'KeyC') || event.code === "F5") {
+ return;
+ }
+
+ // before activating shortcut, ensure user is not actively typing in an input field
+ if (!hotkeysConfig.canvas_blur_prompt) {
+ if (event.target.nodeName === 'TEXTAREA' || event.target.nodeName === 'INPUT') {
+ return;
+ }
+ }
+
+
+ const hotkeyActions = {
+ [hotkeysConfig.canvas_hotkey_reset]: resetZoom,
+ [hotkeysConfig.canvas_hotkey_overlap]: toggleOverlap,
+ [hotkeysConfig.canvas_hotkey_fullscreen]: fitToScreen
+ };
+
+ const action = hotkeyActions[event.code];
+ if (action) {
+ event.preventDefault();
+ action(event);
+ }
+
+ if (
+ isModifierKey(event, hotkeysConfig.canvas_hotkey_zoom) ||
+ isModifierKey(event, hotkeysConfig.canvas_hotkey_adjust)
+ ) {
+ event.preventDefault();
+ }
+ }
+
+ // Get Mouse position
+ function getMousePosition(e) {
+ mouseX = e.offsetX;
+ mouseY = e.offsetY;
+ }
+
+ targetElement.addEventListener("mousemove", getMousePosition);
+
+ // Handle events only inside the targetElement
+ let isKeyDownHandlerAttached = false;
+
+ function handleMouseMove() {
+ if (!isKeyDownHandlerAttached) {
+ document.addEventListener("keydown", handleKeyDown);
+ isKeyDownHandlerAttached = true;
+
+ activeElement = elemId;
+ }
+ }
+
+ function handleMouseLeave() {
+ if (isKeyDownHandlerAttached) {
+ document.removeEventListener("keydown", handleKeyDown);
+ isKeyDownHandlerAttached = false;
+
+ activeElement = null;
+ }
+ }
+
+ // Add mouse event handlers
+ targetElement.addEventListener("mousemove", handleMouseMove);
+ targetElement.addEventListener("mouseleave", handleMouseLeave);
+
+ // Reset zoom when click on another tab
+ elements.img2imgTabs.addEventListener("click", resetZoom);
+ elements.img2imgTabs.addEventListener("click", () => {
+ // targetElement.style.width = "";
+ if (parseInt(targetElement.style.width) > 865) {
+ setTimeout(fitToElement, 0);
+ }
+ });
+
+ targetElement.addEventListener("wheel", e => {
+ // change zoom level
+ const operation = e.deltaY > 0 ? "-" : "+";
+ changeZoomLevel(operation, e);
+
+ // Handle brush size adjustment with ctrl key pressed
+ if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) {
+ e.preventDefault();
+
+ // Increase or decrease brush size based on scroll direction
+ adjustBrushSize(elemId, e.deltaY);
+ }
+ });
+
+ // Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
+ function handleMoveKeyDown(e) {
+
+ // Disable key locks to make pasting from the buffer work correctly
+ if ((e.ctrlKey && e.code === 'KeyV') || (e.ctrlKey && event.code === 'KeyC') || e.code === "F5") {
+ return;
+ }
+
+ // before activating shortcut, ensure user is not actively typing in an input field
+ if (!hotkeysConfig.canvas_blur_prompt) {
+ if (e.target.nodeName === 'TEXTAREA' || e.target.nodeName === 'INPUT') {
+ return;
+ }
+ }
+
+
+ if (e.code === hotkeysConfig.canvas_hotkey_move) {
+ if (!e.ctrlKey && !e.metaKey && isKeyDownHandlerAttached) {
+ e.preventDefault();
+ document.activeElement.blur();
+ isMoving = true;
+ }
+ }
+ }
+
+ function handleMoveKeyUp(e) {
+ if (e.code === hotkeysConfig.canvas_hotkey_move) {
+ isMoving = false;
+ }
+ }
+
+ document.addEventListener("keydown", handleMoveKeyDown);
+ document.addEventListener("keyup", handleMoveKeyUp);
+
+ // Detect zoom level and update the pan speed.
+ function updatePanPosition(movementX, movementY) {
+ let panSpeed = 2;
+
+ if (elemData[elemId].zoomLevel > 8) {
+ panSpeed = 3.5;
+ }
+
+ elemData[elemId].panX += movementX * panSpeed;
+ elemData[elemId].panY += movementY * panSpeed;
+
+ // Delayed redraw of an element
+ requestAnimationFrame(() => {
+ targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${elemData[elemId].zoomLevel})`;
+ toggleOverlap("on");
+ });
+ }
+
+ function handleMoveByKey(e) {
+ if (isMoving && elemId === activeElement) {
+ updatePanPosition(e.movementX, e.movementY);
+ targetElement.style.pointerEvents = "none";
+ } else {
+ targetElement.style.pointerEvents = "auto";
+ }
+ }
+
+ // Prevents sticking to the mouse
+ window.onblur = function() {
+ isMoving = false;
+ };
+
+ gradioApp().addEventListener("mousemove", handleMoveByKey);
+ }
+
+ applyZoomAndPan(elementIDs.sketch);
+ applyZoomAndPan(elementIDs.inpaint);
+ applyZoomAndPan(elementIDs.inpaintSketch);
+
+ // Make the function global so that other extensions can take advantage of this solution
+ window.applyZoomAndPan = applyZoomAndPan;
+});
diff --git a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
new file mode 100644
index 00000000..380176ce
--- /dev/null
+++ b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
@@ -0,0 +1,14 @@
+import gradio as gr
+from modules import shared
+
+shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), {
+ "canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
+ "canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
+ "canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"),
+ "canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "),
+ "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"),
+ "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"),
+ "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
+ "canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"),
+ "canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}),
+}))
diff --git a/extensions-builtin/canvas-zoom-and-pan/style.css b/extensions-builtin/canvas-zoom-and-pan/style.css
new file mode 100644
index 00000000..6bcc9570
--- /dev/null
+++ b/extensions-builtin/canvas-zoom-and-pan/style.css
@@ -0,0 +1,63 @@
+.canvas-tooltip-info {
+ position: absolute;
+ top: 10px;
+ left: 10px;
+ cursor: help;
+ background-color: rgba(0, 0, 0, 0.3);
+ width: 20px;
+ height: 20px;
+ border-radius: 50%;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ flex-direction: column;
+
+ z-index: 100;
+}
+
+.canvas-tooltip-info::after {
+ content: '';
+ display: block;
+ width: 2px;
+ height: 7px;
+ background-color: white;
+ margin-top: 2px;
+}
+
+.canvas-tooltip-info::before {
+ content: '';
+ display: block;
+ width: 2px;
+ height: 2px;
+ background-color: white;
+}
+
+.canvas-tooltip-content {
+ display: none;
+ background-color: #f9f9f9;
+ color: #333;
+ border: 1px solid #ddd;
+ padding: 15px;
+ position: absolute;
+ top: 40px;
+ left: 10px;
+ width: 250px;
+ font-size: 16px;
+ opacity: 0;
+ border-radius: 8px;
+ box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
+
+ z-index: 100;
+}
+
+.canvas-tooltip:hover .canvas-tooltip-content {
+ display: block;
+ animation: fadeIn 0.5s;
+ opacity: 1;
+}
+
+@keyframes fadeIn {
+ from {opacity: 0;}
+ to {opacity: 1;}
+}
+
diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
new file mode 100644
index 00000000..a05e10d8
--- /dev/null
+++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
@@ -0,0 +1,48 @@
+import gradio as gr
+from modules import scripts, shared, ui_components, ui_settings
+from modules.ui_components import FormColumn
+
+
+class ExtraOptionsSection(scripts.Script):
+ section = "extra_options"
+
+ def __init__(self):
+ self.comps = None
+ self.setting_names = None
+
+ def title(self):
+ return "Extra options"
+
+ def show(self, is_img2img):
+ return scripts.AlwaysVisible
+
+ def ui(self, is_img2img):
+ self.comps = []
+ self.setting_names = []
+
+ with gr.Blocks() as interface:
+ with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and shared.opts.extra_options else gr.Group(), gr.Row():
+ for setting_name in shared.opts.extra_options:
+ with FormColumn():
+ comp = ui_settings.create_setting_component(setting_name)
+
+ self.comps.append(comp)
+ self.setting_names.append(setting_name)
+
+ def get_settings_values():
+ return [ui_settings.get_value_for_setting(key) for key in self.setting_names]
+
+ interface.load(fn=get_settings_values, inputs=[], outputs=self.comps, queue=False, show_progress=False)
+
+ return self.comps
+
+ def before_process(self, p, *args):
+ for name, value in zip(self.setting_names, args):
+ if name not in p.override_settings:
+ p.override_settings[name] = value
+
+
+shared.options_templates.update(shared.options_section(('ui', "User interface"), {
+ "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_restart(),
+ "extra_options_accordion": shared.OptionInfo(False, "Place options in main UI into an accordion")
+}))
diff --git a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js
index 5c7a836a..114cf94c 100644
--- a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js
+++ b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js
@@ -4,39 +4,39 @@
// If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong.
function checkBrackets(textArea, counterElt) {
- var counts = {};
- (textArea.value.match(/[(){}\[\]]/g) || []).forEach(bracket => {
- counts[bracket] = (counts[bracket] || 0) + 1;
- });
- var errors = [];
+ var counts = {};
+ (textArea.value.match(/[(){}[\]]/g) || []).forEach(bracket => {
+ counts[bracket] = (counts[bracket] || 0) + 1;
+ });
+ var errors = [];
- function checkPair(open, close, kind) {
- if (counts[open] !== counts[close]) {
- errors.push(
- `${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.`
- );
+ function checkPair(open, close, kind) {
+ if (counts[open] !== counts[close]) {
+ errors.push(
+ `${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.`
+ );
+ }
}
- }
- checkPair('(', ')', 'round brackets');
- checkPair('[', ']', 'square brackets');
- checkPair('{', '}', 'curly brackets');
- counterElt.title = errors.join('\n');
- counterElt.classList.toggle('error', errors.length !== 0);
+ checkPair('(', ')', 'round brackets');
+ checkPair('[', ']', 'square brackets');
+ checkPair('{', '}', 'curly brackets');
+ counterElt.title = errors.join('\n');
+ counterElt.classList.toggle('error', errors.length !== 0);
}
function setupBracketChecking(id_prompt, id_counter) {
- var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea");
- var counter = gradioApp().getElementById(id_counter)
+ var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea");
+ var counter = gradioApp().getElementById(id_counter);
- if (textarea && counter) {
- textarea.addEventListener("input", () => checkBrackets(textarea, counter));
- }
+ if (textarea && counter) {
+ textarea.addEventListener("input", () => checkBrackets(textarea, counter));
+ }
}
-onUiLoaded(function () {
- setupBracketChecking('txt2img_prompt', 'txt2img_token_counter');
- setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter');
- setupBracketChecking('img2img_prompt', 'img2img_token_counter');
- setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter');
+onUiLoaded(function() {
+ setupBracketChecking('txt2img_prompt', 'txt2img_token_counter');
+ setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter');
+ setupBracketChecking('img2img_prompt', 'img2img_token_counter');
+ setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter');
});