From ad4de819c43997f2666b5bad95301f5c37f9018e Mon Sep 17 00:00:00 2001 From: victorca25 Date: Sun, 9 Oct 2022 13:02:12 +0200 Subject: update ESRGAN architecture and model to support all ESRGAN models in the DB, BSRGAN and real-ESRGAN models --- modules/bsrgan_model.py | 76 ------- modules/bsrgan_model_arch.py | 102 ---------- modules/esrgam_model_arch.py | 80 -------- modules/esrgan_model.py | 190 ++++++++++++------ modules/esrgan_model_arch.py | 463 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 591 insertions(+), 320 deletions(-) delete mode 100644 modules/bsrgan_model.py delete mode 100644 modules/bsrgan_model_arch.py delete mode 100644 modules/esrgam_model_arch.py create mode 100644 modules/esrgan_model_arch.py (limited to 'modules') diff --git a/modules/bsrgan_model.py b/modules/bsrgan_model.py deleted file mode 100644 index 737e1a76..00000000 --- a/modules/bsrgan_model.py +++ /dev/null @@ -1,76 +0,0 @@ -import os.path -import sys -import traceback - -import PIL.Image -import numpy as np -import torch -from basicsr.utils.download_util import load_file_from_url - -import modules.upscaler -from modules import devices, modelloader -from modules.bsrgan_model_arch import RRDBNet - - -class UpscalerBSRGAN(modules.upscaler.Upscaler): - def __init__(self, dirname): - self.name = "BSRGAN" - self.model_name = "BSRGAN 4x" - self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/BSRGAN.pth" - self.user_path = dirname - super().__init__() - model_paths = self.find_models(ext_filter=[".pt", ".pth"]) - scalers = [] - if len(model_paths) == 0: - scaler_data = modules.upscaler.UpscalerData(self.model_name, self.model_url, self, 4) - scalers.append(scaler_data) - for file in model_paths: - if "http" in file: - name = self.model_name - else: - name = modelloader.friendly_name(file) - try: - scaler_data = modules.upscaler.UpscalerData(name, file, self, 4) - scalers.append(scaler_data) - except Exception: - print(f"Error loading BSRGAN model: {file}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - self.scalers = scalers - - def do_upscale(self, img: PIL.Image, selected_file): - torch.cuda.empty_cache() - model = self.load_model(selected_file) - if model is None: - return img - model.to(devices.device_bsrgan) - torch.cuda.empty_cache() - img = np.array(img) - img = img[:, :, ::-1] - img = np.moveaxis(img, 2, 0) / 255 - img = torch.from_numpy(img).float() - img = img.unsqueeze(0).to(devices.device_bsrgan) - with torch.no_grad(): - output = model(img) - output = output.squeeze().float().cpu().clamp_(0, 1).numpy() - output = 255. * np.moveaxis(output, 0, 2) - output = output.astype(np.uint8) - output = output[:, :, ::-1] - torch.cuda.empty_cache() - return PIL.Image.fromarray(output, 'RGB') - - def load_model(self, path: str): - if "http" in path: - filename = load_file_from_url(url=self.model_url, model_dir=self.model_path, file_name="%s.pth" % self.name, - progress=True) - else: - filename = path - if not os.path.exists(filename) or filename is None: - print(f"BSRGAN: Unable to load model from {filename}", file=sys.stderr) - return None - model = RRDBNet(in_nc=3, out_nc=3, nf=64, nb=23, gc=32, sf=4) # define network - model.load_state_dict(torch.load(filename), strict=True) - model.eval() - for k, v in model.named_parameters(): - v.requires_grad = False - return model - diff --git a/modules/bsrgan_model_arch.py b/modules/bsrgan_model_arch.py deleted file mode 100644 index cb4d1c13..00000000 --- a/modules/bsrgan_model_arch.py +++ /dev/null @@ -1,102 +0,0 @@ -import functools -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.nn.init as init - - -def initialize_weights(net_l, scale=1): - if not isinstance(net_l, list): - net_l = [net_l] - for net in net_l: - for m in net.modules(): - if isinstance(m, nn.Conv2d): - init.kaiming_normal_(m.weight, a=0, mode='fan_in') - m.weight.data *= scale # for residual block - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.Linear): - init.kaiming_normal_(m.weight, a=0, mode='fan_in') - m.weight.data *= scale - if m.bias is not None: - m.bias.data.zero_() - elif isinstance(m, nn.BatchNorm2d): - init.constant_(m.weight, 1) - init.constant_(m.bias.data, 0.0) - - -def make_layer(block, n_layers): - layers = [] - for _ in range(n_layers): - layers.append(block()) - return nn.Sequential(*layers) - - -class ResidualDenseBlock_5C(nn.Module): - def __init__(self, nf=64, gc=32, bias=True): - super(ResidualDenseBlock_5C, self).__init__() - # gc: growth channel, i.e. intermediate channels - self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias) - self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias) - self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias) - self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias) - self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias) - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - - # initialization - initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1) - - def forward(self, x): - x1 = self.lrelu(self.conv1(x)) - x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1))) - x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1))) - x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1))) - x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) - return x5 * 0.2 + x - - -class RRDB(nn.Module): - '''Residual in Residual Dense Block''' - - def __init__(self, nf, gc=32): - super(RRDB, self).__init__() - self.RDB1 = ResidualDenseBlock_5C(nf, gc) - self.RDB2 = ResidualDenseBlock_5C(nf, gc) - self.RDB3 = ResidualDenseBlock_5C(nf, gc) - - def forward(self, x): - out = self.RDB1(x) - out = self.RDB2(out) - out = self.RDB3(out) - return out * 0.2 + x - - -class RRDBNet(nn.Module): - def __init__(self, in_nc=3, out_nc=3, nf=64, nb=23, gc=32, sf=4): - super(RRDBNet, self).__init__() - RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc) - self.sf = sf - - self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True) - self.RRDB_trunk = make_layer(RRDB_block_f, nb) - self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - #### upsampling - self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - if self.sf==4: - self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True) - - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - - def forward(self, x): - fea = self.conv_first(x) - trunk = self.trunk_conv(self.RRDB_trunk(fea)) - fea = fea + trunk - - fea = self.lrelu(self.upconv1(F.interpolate(fea, scale_factor=2, mode='nearest'))) - if self.sf==4: - fea = self.lrelu(self.upconv2(F.interpolate(fea, scale_factor=2, mode='nearest'))) - out = self.conv_last(self.lrelu(self.HRconv(fea))) - - return out \ No newline at end of file diff --git a/modules/esrgam_model_arch.py b/modules/esrgam_model_arch.py deleted file mode 100644 index e413d36e..00000000 --- a/modules/esrgam_model_arch.py +++ /dev/null @@ -1,80 +0,0 @@ -# this file is taken from https://github.com/xinntao/ESRGAN - -import functools -import torch -import torch.nn as nn -import torch.nn.functional as F - - -def make_layer(block, n_layers): - layers = [] - for _ in range(n_layers): - layers.append(block()) - return nn.Sequential(*layers) - - -class ResidualDenseBlock_5C(nn.Module): - def __init__(self, nf=64, gc=32, bias=True): - super(ResidualDenseBlock_5C, self).__init__() - # gc: growth channel, i.e. intermediate channels - self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias) - self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias) - self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias) - self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias) - self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias) - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - - # initialization - # mutil.initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1) - - def forward(self, x): - x1 = self.lrelu(self.conv1(x)) - x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1))) - x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1))) - x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1))) - x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) - return x5 * 0.2 + x - - -class RRDB(nn.Module): - '''Residual in Residual Dense Block''' - - def __init__(self, nf, gc=32): - super(RRDB, self).__init__() - self.RDB1 = ResidualDenseBlock_5C(nf, gc) - self.RDB2 = ResidualDenseBlock_5C(nf, gc) - self.RDB3 = ResidualDenseBlock_5C(nf, gc) - - def forward(self, x): - out = self.RDB1(x) - out = self.RDB2(out) - out = self.RDB3(out) - return out * 0.2 + x - - -class RRDBNet(nn.Module): - def __init__(self, in_nc, out_nc, nf, nb, gc=32): - super(RRDBNet, self).__init__() - RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc) - - self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True) - self.RRDB_trunk = make_layer(RRDB_block_f, nb) - self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - #### upsampling - self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) - self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True) - - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - - def forward(self, x): - fea = self.conv_first(x) - trunk = self.trunk_conv(self.RRDB_trunk(fea)) - fea = fea + trunk - - fea = self.lrelu(self.upconv1(F.interpolate(fea, scale_factor=2, mode='nearest'))) - fea = self.lrelu(self.upconv2(F.interpolate(fea, scale_factor=2, mode='nearest'))) - out = self.conv_last(self.lrelu(self.HRconv(fea))) - - return out diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index 3970e6e4..a49e2258 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -5,68 +5,115 @@ import torch from PIL import Image from basicsr.utils.download_util import load_file_from_url -import modules.esrgam_model_arch as arch +import modules.esrgan_model_arch as arch from modules import shared, modelloader, images, devices from modules.upscaler import Upscaler, UpscalerData from modules.shared import opts -def fix_model_layers(crt_model, pretrained_net): - # this code is adapted from https://github.com/xinntao/ESRGAN - if 'conv_first.weight' in pretrained_net: - return pretrained_net - if 'model.0.weight' not in pretrained_net: - is_realesrgan = "params_ema" in pretrained_net and 'body.0.rdb1.conv1.weight' in pretrained_net["params_ema"] - if is_realesrgan: - raise Exception("The file is a RealESRGAN model, it can't be used as a ESRGAN model.") - else: - raise Exception("The file is not a ESRGAN model.") +def mod2normal(state_dict): + # this code is copied from https://github.com/victorca25/iNNfer + if 'conv_first.weight' in state_dict: + crt_net = {} + items = [] + for k, v in state_dict.items(): + items.append(k) + + crt_net['model.0.weight'] = state_dict['conv_first.weight'] + crt_net['model.0.bias'] = state_dict['conv_first.bias'] + + for k in items.copy(): + if 'RDB' in k: + ori_k = k.replace('RRDB_trunk.', 'model.1.sub.') + if '.weight' in k: + ori_k = ori_k.replace('.weight', '.0.weight') + elif '.bias' in k: + ori_k = ori_k.replace('.bias', '.0.bias') + crt_net[ori_k] = state_dict[k] + items.remove(k) + + crt_net['model.1.sub.23.weight'] = state_dict['trunk_conv.weight'] + crt_net['model.1.sub.23.bias'] = state_dict['trunk_conv.bias'] + crt_net['model.3.weight'] = state_dict['upconv1.weight'] + crt_net['model.3.bias'] = state_dict['upconv1.bias'] + crt_net['model.6.weight'] = state_dict['upconv2.weight'] + crt_net['model.6.bias'] = state_dict['upconv2.bias'] + crt_net['model.8.weight'] = state_dict['HRconv.weight'] + crt_net['model.8.bias'] = state_dict['HRconv.bias'] + crt_net['model.10.weight'] = state_dict['conv_last.weight'] + crt_net['model.10.bias'] = state_dict['conv_last.bias'] + state_dict = crt_net + return state_dict + + +def resrgan2normal(state_dict, nb=23): + # this code is copied from https://github.com/victorca25/iNNfer + if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict: + crt_net = {} + items = [] + for k, v in state_dict.items(): + items.append(k) + + crt_net['model.0.weight'] = state_dict['conv_first.weight'] + crt_net['model.0.bias'] = state_dict['conv_first.bias'] + + for k in items.copy(): + if "rdb" in k: + ori_k = k.replace('body.', 'model.1.sub.') + ori_k = ori_k.replace('.rdb', '.RDB') + if '.weight' in k: + ori_k = ori_k.replace('.weight', '.0.weight') + elif '.bias' in k: + ori_k = ori_k.replace('.bias', '.0.bias') + crt_net[ori_k] = state_dict[k] + items.remove(k) + + crt_net[f'model.1.sub.{nb}.weight'] = state_dict['conv_body.weight'] + crt_net[f'model.1.sub.{nb}.bias'] = state_dict['conv_body.bias'] + crt_net['model.3.weight'] = state_dict['conv_up1.weight'] + crt_net['model.3.bias'] = state_dict['conv_up1.bias'] + crt_net['model.6.weight'] = state_dict['conv_up2.weight'] + crt_net['model.6.bias'] = state_dict['conv_up2.bias'] + crt_net['model.8.weight'] = state_dict['conv_hr.weight'] + crt_net['model.8.bias'] = state_dict['conv_hr.bias'] + crt_net['model.10.weight'] = state_dict['conv_last.weight'] + crt_net['model.10.bias'] = state_dict['conv_last.bias'] + state_dict = crt_net + return state_dict + + +def infer_params(state_dict): + # this code is copied from https://github.com/victorca25/iNNfer + scale2x = 0 + scalemin = 6 + n_uplayer = 0 + plus = False + + for block in list(state_dict): + parts = block.split(".") + n_parts = len(parts) + if n_parts == 5 and parts[2] == "sub": + nb = int(parts[3]) + elif n_parts == 3: + part_num = int(parts[1]) + if (part_num > scalemin + and parts[0] == "model" + and parts[2] == "weight"): + scale2x += 1 + if part_num > n_uplayer: + n_uplayer = part_num + out_nc = state_dict[block].shape[0] + if not plus and "conv1x1" in block: + plus = True + + nf = state_dict["model.0.weight"].shape[0] + in_nc = state_dict["model.0.weight"].shape[1] + out_nc = out_nc + scale = 2 ** scale2x + + return in_nc, out_nc, nf, nb, plus, scale - crt_net = crt_model.state_dict() - load_net_clean = {} - for k, v in pretrained_net.items(): - if k.startswith('module.'): - load_net_clean[k[7:]] = v - else: - load_net_clean[k] = v - pretrained_net = load_net_clean - - tbd = [] - for k, v in crt_net.items(): - tbd.append(k) - - # directly copy - for k, v in crt_net.items(): - if k in pretrained_net and pretrained_net[k].size() == v.size(): - crt_net[k] = pretrained_net[k] - tbd.remove(k) - - crt_net['conv_first.weight'] = pretrained_net['model.0.weight'] - crt_net['conv_first.bias'] = pretrained_net['model.0.bias'] - - for k in tbd.copy(): - if 'RDB' in k: - ori_k = k.replace('RRDB_trunk.', 'model.1.sub.') - if '.weight' in k: - ori_k = ori_k.replace('.weight', '.0.weight') - elif '.bias' in k: - ori_k = ori_k.replace('.bias', '.0.bias') - crt_net[k] = pretrained_net[ori_k] - tbd.remove(k) - - crt_net['trunk_conv.weight'] = pretrained_net['model.1.sub.23.weight'] - crt_net['trunk_conv.bias'] = pretrained_net['model.1.sub.23.bias'] - crt_net['upconv1.weight'] = pretrained_net['model.3.weight'] - crt_net['upconv1.bias'] = pretrained_net['model.3.bias'] - crt_net['upconv2.weight'] = pretrained_net['model.6.weight'] - crt_net['upconv2.bias'] = pretrained_net['model.6.bias'] - crt_net['HRconv.weight'] = pretrained_net['model.8.weight'] - crt_net['HRconv.bias'] = pretrained_net['model.8.bias'] - crt_net['conv_last.weight'] = pretrained_net['model.10.weight'] - crt_net['conv_last.bias'] = pretrained_net['model.10.bias'] - - return crt_net class UpscalerESRGAN(Upscaler): def __init__(self, dirname): @@ -109,20 +156,39 @@ class UpscalerESRGAN(Upscaler): print("Unable to load %s from %s" % (self.model_path, filename)) return None - pretrained_net = torch.load(filename, map_location='cpu' if devices.device_esrgan.type == 'mps' else None) - crt_model = arch.RRDBNet(3, 3, 64, 23, gc=32) + state_dict = torch.load(filename, map_location='cpu' if devices.device_esrgan.type == 'mps' else None) + + if "params_ema" in state_dict: + state_dict = state_dict["params_ema"] + elif "params" in state_dict: + state_dict = state_dict["params"] + num_conv = 16 if "realesr-animevideov3" in filename else 32 + model = arch.SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=num_conv, upscale=4, act_type='prelu') + model.load_state_dict(state_dict) + model.eval() + return model + + if "body.0.rdb1.conv1.weight" in state_dict and "conv_first.weight" in state_dict: + nb = 6 if "RealESRGAN_x4plus_anime_6B" in filename else 23 + state_dict = resrgan2normal(state_dict, nb) + elif "conv_first.weight" in state_dict: + state_dict = mod2normal(state_dict) + elif "model.0.weight" not in state_dict: + raise Exception("The file is not a recognized ESRGAN model.") + + in_nc, out_nc, nf, nb, plus, mscale = infer_params(state_dict) - pretrained_net = fix_model_layers(crt_model, pretrained_net) - crt_model.load_state_dict(pretrained_net) - crt_model.eval() + model = arch.RRDBNet(in_nc=in_nc, out_nc=out_nc, nf=nf, nb=nb, upscale=mscale, plus=plus) + model.load_state_dict(state_dict) + model.eval() - return crt_model + return model def upscale_without_tiling(model, img): img = np.array(img) img = img[:, :, ::-1] - img = np.moveaxis(img, 2, 0) / 255 + img = np.ascontiguousarray(np.transpose(img, (2, 0, 1))) / 255 img = torch.from_numpy(img).float() img = img.unsqueeze(0).to(devices.device_esrgan) with torch.no_grad(): diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py new file mode 100644 index 00000000..bc9ceb2a --- /dev/null +++ b/modules/esrgan_model_arch.py @@ -0,0 +1,463 @@ +# this file is adapted from https://github.com/victorca25/iNNfer + +import math +import functools +import torch +import torch.nn as nn +import torch.nn.functional as F + + +#################### +# RRDBNet Generator +#################### + +class RRDBNet(nn.Module): + def __init__(self, in_nc, out_nc, nf, nb, nr=3, gc=32, upscale=4, norm_type=None, + act_type='leakyrelu', mode='CNA', upsample_mode='upconv', convtype='Conv2D', + finalact=None, gaussian_noise=False, plus=False): + super(RRDBNet, self).__init__() + n_upscale = int(math.log(upscale, 2)) + if upscale == 3: + n_upscale = 1 + + self.resrgan_scale = 0 + if in_nc % 16 == 0: + self.resrgan_scale = 1 + elif in_nc != 4 and in_nc % 4 == 0: + self.resrgan_scale = 2 + + fea_conv = conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None, convtype=convtype) + rb_blocks = [RRDB(nf, nr, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero', + norm_type=norm_type, act_type=act_type, mode='CNA', convtype=convtype, + gaussian_noise=gaussian_noise, plus=plus) for _ in range(nb)] + LR_conv = conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode, convtype=convtype) + + if upsample_mode == 'upconv': + upsample_block = upconv_block + elif upsample_mode == 'pixelshuffle': + upsample_block = pixelshuffle_block + else: + raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode)) + if upscale == 3: + upsampler = upsample_block(nf, nf, 3, act_type=act_type, convtype=convtype) + else: + upsampler = [upsample_block(nf, nf, act_type=act_type, convtype=convtype) for _ in range(n_upscale)] + HR_conv0 = conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type, convtype=convtype) + HR_conv1 = conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None, convtype=convtype) + + outact = act(finalact) if finalact else None + + self.model = sequential(fea_conv, ShortcutBlock(sequential(*rb_blocks, LR_conv)), + *upsampler, HR_conv0, HR_conv1, outact) + + def forward(self, x, outm=None): + if self.resrgan_scale == 1: + feat = pixel_unshuffle(x, scale=4) + elif self.resrgan_scale == 2: + feat = pixel_unshuffle(x, scale=2) + else: + feat = x + + return self.model(feat) + + +class RRDB(nn.Module): + """ + Residual in Residual Dense Block + (ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks) + """ + + def __init__(self, nf, nr=3, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero', + norm_type=None, act_type='leakyrelu', mode='CNA', convtype='Conv2D', + spectral_norm=False, gaussian_noise=False, plus=False): + super(RRDB, self).__init__() + # This is for backwards compatibility with existing models + if nr == 3: + self.RDB1 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type, + norm_type, act_type, mode, convtype, spectral_norm=spectral_norm, + gaussian_noise=gaussian_noise, plus=plus) + self.RDB2 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type, + norm_type, act_type, mode, convtype, spectral_norm=spectral_norm, + gaussian_noise=gaussian_noise, plus=plus) + self.RDB3 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type, + norm_type, act_type, mode, convtype, spectral_norm=spectral_norm, + gaussian_noise=gaussian_noise, plus=plus) + else: + RDB_list = [ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type, + norm_type, act_type, mode, convtype, spectral_norm=spectral_norm, + gaussian_noise=gaussian_noise, plus=plus) for _ in range(nr)] + self.RDBs = nn.Sequential(*RDB_list) + + def forward(self, x): + if hasattr(self, 'RDB1'): + out = self.RDB1(x) + out = self.RDB2(out) + out = self.RDB3(out) + else: + out = self.RDBs(x) + return out * 0.2 + x + + +class ResidualDenseBlock_5C(nn.Module): + """ + Residual Dense Block + The core module of paper: (Residual Dense Network for Image Super-Resolution, CVPR 18) + Modified options that can be used: + - "Partial Convolution based Padding" arXiv:1811.11718 + - "Spectral normalization" arXiv:1802.05957 + - "ICASSP 2020 - ESRGAN+ : Further Improving ESRGAN" N. C. + {Rakotonirina} and A. {Rasoanaivo} + """ + + def __init__(self, nf=64, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero', + norm_type=None, act_type='leakyrelu', mode='CNA', convtype='Conv2D', + spectral_norm=False, gaussian_noise=False, plus=False): + super(ResidualDenseBlock_5C, self).__init__() + + self.noise = GaussianNoise() if gaussian_noise else None + self.conv1x1 = conv1x1(nf, gc) if plus else None + + self.conv1 = conv_block(nf, gc, kernel_size, stride, bias=bias, pad_type=pad_type, + norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype, + spectral_norm=spectral_norm) + self.conv2 = conv_block(nf+gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, + norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype, + spectral_norm=spectral_norm) + self.conv3 = conv_block(nf+2*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, + norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype, + spectral_norm=spectral_norm) + self.conv4 = conv_block(nf+3*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, + norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype, + spectral_norm=spectral_norm) + if mode == 'CNA': + last_act = None + else: + last_act = act_type + self.conv5 = conv_block(nf+4*gc, nf, 3, stride, bias=bias, pad_type=pad_type, + norm_type=norm_type, act_type=last_act, mode=mode, convtype=convtype, + spectral_norm=spectral_norm) + + def forward(self, x): + x1 = self.conv1(x) + x2 = self.conv2(torch.cat((x, x1), 1)) + if self.conv1x1: + x2 = x2 + self.conv1x1(x) + x3 = self.conv3(torch.cat((x, x1, x2), 1)) + x4 = self.conv4(torch.cat((x, x1, x2, x3), 1)) + if self.conv1x1: + x4 = x4 + x2 + x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) + if self.noise: + return self.noise(x5.mul(0.2) + x) + else: + return x5 * 0.2 + x + + +#################### +# ESRGANplus +#################### + +class GaussianNoise(nn.Module): + def __init__(self, sigma=0.1, is_relative_detach=False): + super().__init__() + self.sigma = sigma + self.is_relative_detach = is_relative_detach + self.noise = torch.tensor(0, dtype=torch.float) + + def forward(self, x): + if self.training and self.sigma != 0: + self.noise = self.noise.to(x.device) + scale = self.sigma * x.detach() if self.is_relative_detach else self.sigma * x + sampled_noise = self.noise.repeat(*x.size()).normal_() * scale + x = x + sampled_noise + return x + +def conv1x1(in_planes, out_planes, stride=1): + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +#################### +# SRVGGNetCompact +#################### + +class SRVGGNetCompact(nn.Module): + """A compact VGG-style network structure for super-resolution. + This class is copied from https://github.com/xinntao/Real-ESRGAN + """ + + def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu'): + super(SRVGGNetCompact, self).__init__() + self.num_in_ch = num_in_ch + self.num_out_ch = num_out_ch + self.num_feat = num_feat + self.num_conv = num_conv + self.upscale = upscale + self.act_type = act_type + + self.body = nn.ModuleList() + # the first conv + self.body.append(nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)) + # the first activation + if act_type == 'relu': + activation = nn.ReLU(inplace=True) + elif act_type == 'prelu': + activation = nn.PReLU(num_parameters=num_feat) + elif act_type == 'leakyrelu': + activation = nn.LeakyReLU(negative_slope=0.1, inplace=True) + self.body.append(activation) + + # the body structure + for _ in range(num_conv): + self.body.append(nn.Conv2d(num_feat, num_feat, 3, 1, 1)) + # activation + if act_type == 'relu': + activation = nn.ReLU(inplace=True) + elif act_type == 'prelu': + activation = nn.PReLU(num_parameters=num_feat) + elif act_type == 'leakyrelu': + activation = nn.LeakyReLU(negative_slope=0.1, inplace=True) + self.body.append(activation) + + # the last conv + self.body.append(nn.Conv2d(num_feat, num_out_ch * upscale * upscale, 3, 1, 1)) + # upsample + self.upsampler = nn.PixelShuffle(upscale) + + def forward(self, x): + out = x + for i in range(0, len(self.body)): + out = self.body[i](out) + + out = self.upsampler(out) + # add the nearest upsampled image, so that the network learns the residual + base = F.interpolate(x, scale_factor=self.upscale, mode='nearest') + out += base + return out + + +#################### +# Upsampler +#################### + +class Upsample(nn.Module): + r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data. + The input data is assumed to be of the form + `minibatch x channels x [optional depth] x [optional height] x width`. + """ + + def __init__(self, size=None, scale_factor=None, mode="nearest", align_corners=None): + super(Upsample, self).__init__() + if isinstance(scale_factor, tuple): + self.scale_factor = tuple(float(factor) for factor in scale_factor) + else: + self.scale_factor = float(scale_factor) if scale_factor else None + self.mode = mode + self.size = size + self.align_corners = align_corners + + def forward(self, x): + return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) + + def extra_repr(self): + if self.scale_factor is not None: + info = 'scale_factor=' + str(self.scale_factor) + else: + info = 'size=' + str(self.size) + info += ', mode=' + self.mode + return info + + +def pixel_unshuffle(x, scale): + """ Pixel unshuffle. + Args: + x (Tensor): Input feature with shape (b, c, hh, hw). + scale (int): Downsample ratio. + Returns: + Tensor: the pixel unshuffled feature. + """ + b, c, hh, hw = x.size() + out_channel = c * (scale**2) + assert hh % scale == 0 and hw % scale == 0 + h = hh // scale + w = hw // scale + x_view = x.view(b, c, h, scale, w, scale) + return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w) + + +def pixelshuffle_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, + pad_type='zero', norm_type=None, act_type='relu', convtype='Conv2D'): + """ + Pixel shuffle layer + (Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional + Neural Network, CVPR17) + """ + conv = conv_block(in_nc, out_nc * (upscale_factor ** 2), kernel_size, stride, bias=bias, + pad_type=pad_type, norm_type=None, act_type=None, convtype=convtype) + pixel_shuffle = nn.PixelShuffle(upscale_factor) + + n = norm(norm_type, out_nc) if norm_type else None + a = act(act_type) if act_type else None + return sequential(conv, pixel_shuffle, n, a) + + +def upconv_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, + pad_type='zero', norm_type=None, act_type='relu', mode='nearest', convtype='Conv2D'): + """ Upconv layer """ + upscale_factor = (1, upscale_factor, upscale_factor) if convtype == 'Conv3D' else upscale_factor + upsample = Upsample(scale_factor=upscale_factor, mode=mode) + conv = conv_block(in_nc, out_nc, kernel_size, stride, bias=bias, + pad_type=pad_type, norm_type=norm_type, act_type=act_type, convtype=convtype) + return sequential(upsample, conv) + + + + + + + + +#################### +# Basic blocks +#################### + + +def make_layer(basic_block, num_basic_block, **kwarg): + """Make layers by stacking the same blocks. + Args: + basic_block (nn.module): nn.module class for basic block. (block) + num_basic_block (int): number of blocks. (n_layers) + Returns: + nn.Sequential: Stacked blocks in nn.Sequential. + """ + layers = [] + for _ in range(num_basic_block): + layers.append(basic_block(**kwarg)) + return nn.Sequential(*layers) + + +def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1, beta=1.0): + """ activation helper """ + act_type = act_type.lower() + if act_type == 'relu': + layer = nn.ReLU(inplace) + elif act_type in ('leakyrelu', 'lrelu'): + layer = nn.LeakyReLU(neg_slope, inplace) + elif act_type == 'prelu': + layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope) + elif act_type == 'tanh': # [-1, 1] range output + layer = nn.Tanh() + elif act_type == 'sigmoid': # [0, 1] range output + layer = nn.Sigmoid() + else: + raise NotImplementedError('activation layer [{:s}] is not found'.format(act_type)) + return layer + + +class Identity(nn.Module): + def __init__(self, *kwargs): + super(Identity, self).__init__() + + def forward(self, x, *kwargs): + return x + + +def norm(norm_type, nc): + """ Return a normalization layer """ + norm_type = norm_type.lower() + if norm_type == 'batch': + layer = nn.BatchNorm2d(nc, affine=True) + elif norm_type == 'instance': + layer = nn.InstanceNorm2d(nc, affine=False) + elif norm_type == 'none': + def norm_layer(x): return Identity() + else: + raise NotImplementedError('normalization layer [{:s}] is not found'.format(norm_type)) + return layer + + +def pad(pad_type, padding): + """ padding layer helper """ + pad_type = pad_type.lower() + if padding == 0: + return None + if pad_type == 'reflect': + layer = nn.ReflectionPad2d(padding) + elif pad_type == 'replicate': + layer = nn.ReplicationPad2d(padding) + elif pad_type == 'zero': + layer = nn.ZeroPad2d(padding) + else: + raise NotImplementedError('padding layer [{:s}] is not implemented'.format(pad_type)) + return layer + + +def get_valid_padding(kernel_size, dilation): + kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1) + padding = (kernel_size - 1) // 2 + return padding + + +class ShortcutBlock(nn.Module): + """ Elementwise sum the output of a submodule to its input """ + def __init__(self, submodule): + super(ShortcutBlock, self).__init__() + self.sub = submodule + + def forward(self, x): + output = x + self.sub(x) + return output + + def __repr__(self): + return 'Identity + \n|' + self.sub.__repr__().replace('\n', '\n|') + + +def sequential(*args): + """ Flatten Sequential. It unwraps nn.Sequential. """ + if len(args) == 1: + if isinstance(args[0], OrderedDict): + raise NotImplementedError('sequential does not support OrderedDict input.') + return args[0] # No sequential is needed. + modules = [] + for module in args: + if isinstance(module, nn.Sequential): + for submodule in module.children(): + modules.append(submodule) + elif isinstance(module, nn.Module): + modules.append(module) + return nn.Sequential(*modules) + + +def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=True, + pad_type='zero', norm_type=None, act_type='relu', mode='CNA', convtype='Conv2D', + spectral_norm=False): + """ Conv layer with padding, normalization, activation """ + assert mode in ['CNA', 'NAC', 'CNAC'], 'Wrong conv mode [{:s}]'.format(mode) + padding = get_valid_padding(kernel_size, dilation) + p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None + padding = padding if pad_type == 'zero' else 0 + + if convtype=='PartialConv2D': + c = PartialConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, + dilation=dilation, bias=bias, groups=groups) + elif convtype=='DeformConv2D': + c = DeformConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, + dilation=dilation, bias=bias, groups=groups) + elif convtype=='Conv3D': + c = nn.Conv3d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, + dilation=dilation, bias=bias, groups=groups) + else: + c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, + dilation=dilation, bias=bias, groups=groups) + + if spectral_norm: + c = nn.utils.spectral_norm(c) + + a = act(act_type) if act_type else None + if 'CNA' in mode: + n = norm(norm_type, out_nc) if norm_type else None + return sequential(p, c, n, a) + elif mode == 'NAC': + if norm_type is None and act_type is not None: + a = act(act_type, inplace=False) + n = norm(norm_type, in_nc) if norm_type else None + return sequential(n, a, p, c) -- cgit v1.2.1 From 763b893f319cee280b86e63025eb55e7c16b02e7 Mon Sep 17 00:00:00 2001 From: yfszzx Date: Sun, 16 Oct 2022 10:03:09 +0800 Subject: images history sorting files by date --- modules/images_history.py | 261 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 196 insertions(+), 65 deletions(-) (limited to 'modules') diff --git a/modules/images_history.py b/modules/images_history.py index f5ef44fe..533cf51b 100644 --- a/modules/images_history.py +++ b/modules/images_history.py @@ -1,33 +1,74 @@ import os import shutil +import time +import hashlib +import gradio +show_max_dates_num = 3 +system_bak_path = "webui_log_and_bak" +def is_valid_date(date): + try: + time.strptime(date, "%Y%m%d") + return True + except: + return False +def reduplicative_file_move(src, dst): + def same_name_file(basename, path): + name, ext = os.path.splitext(basename) + f_list = os.listdir(path) + max_num = 0 + for f in f_list: + if len(f) <= len(basename): + continue + f_ext = f[-len(ext):] if len(ext) > 0 else "" + if f[:len(name)] == name and f_ext == ext: + if f[len(name)] == "(" and f[-len(ext)-1] == ")": + number = f[len(name)+1:-len(ext)-1] + if number.isdigit(): + if int(number) > max_num: + max_num = int(number) + return f"{name}({max_num + 1}){ext}" + name = os.path.basename(src) + save_name = os.path.join(dst, name) + if not os.path.exists(save_name): + shutil.move(src, dst) + else: + name = same_name_file(name, dst) + shutil.move(src, os.path.join(dst, name)) -def traverse_all_files(output_dir, image_list, curr_dir=None): - curr_path = output_dir if curr_dir is None else os.path.join(output_dir, curr_dir) +def traverse_all_files(curr_path, image_list, all_type=False): try: f_list = os.listdir(curr_path) except: - if curr_dir[-10:].rfind(".") > 0 and curr_dir[-4:] != ".txt": - image_list.append(curr_dir) + if all_type or curr_path[-10:].rfind(".") > 0 and curr_path[-4:] != ".txt": + image_list.append(curr_path) return image_list for file in f_list: - file = file if curr_dir is None else os.path.join(curr_dir, file) - file_path = os.path.join(curr_path, file) - if file[-4:] == ".txt": + file = os.path.join(curr_path, file) + if (not all_type) and file[-4:] == ".txt": pass - elif os.path.isfile(file_path) and file[-10:].rfind(".") > 0: + elif os.path.isfile(file) and file[-10:].rfind(".") > 0: image_list.append(file) else: - image_list = traverse_all_files(output_dir, image_list, file) + image_list = traverse_all_files(file, image_list) return image_list - -def get_recent_images(dir_name, page_index, step, image_index, tabname): - page_index = int(page_index) - f_list = os.listdir(dir_name) +def get_recent_images(dir_name, page_index, step, image_index, tabname, date_from, date_to): + #print(f"turn_page {page_index}",date_from) + if date_from is None or date_from == "": + return None, 1, None, "" image_list = [] - image_list = traverse_all_files(dir_name, image_list) - image_list = sorted(image_list, key=lambda file: -os.path.getctime(os.path.join(dir_name, file))) + date_list = auto_sorting(dir_name) + page_index = int(page_index) + today = time.strftime("%Y%m%d",time.localtime(time.time())) + for date in date_list: + if date >= date_from and date <= date_to: + path = os.path.join(dir_name, date) + if date == today and not os.path.exists(path): + continue + image_list = traverse_all_files(path, image_list) + + image_list = sorted(image_list, key=lambda file: -os.path.getctime(file)) num = 48 if tabname != "extras" else 12 max_page_index = len(image_list) // num + 1 page_index = max_page_index if page_index == -1 else page_index + step @@ -38,40 +79,101 @@ def get_recent_images(dir_name, page_index, step, image_index, tabname): image_index = int(image_index) if image_index < 0 or image_index > len(image_list) - 1: current_file = None - hidden = None else: - current_file = image_list[int(image_index)] - hidden = os.path.join(dir_name, current_file) - return [os.path.join(dir_name, file) for file in image_list], page_index, image_list, current_file, hidden, "" + current_file = image_list[image_index] + return image_list, page_index, image_list, "" +def auto_sorting(dir_name): + #print(f"auto sorting") + bak_path = os.path.join(dir_name, system_bak_path) + if not os.path.exists(bak_path): + os.mkdir(bak_path) + log_file = None + files_list = [] + f_list = os.listdir(dir_name) + for file in f_list: + if file == system_bak_path: + continue + file_path = os.path.join(dir_name, file) + if not is_valid_date(file): + if file[-10:].rfind(".") > 0: + files_list.append(file_path) + else: + files_list = traverse_all_files(file_path, files_list, all_type=True) + + for file in files_list: + date_str = time.strftime("%Y%m%d",time.localtime(os.path.getctime(file))) + file_path = os.path.dirname(file) + hash_path = hashlib.md5(file_path.encode()).hexdigest() + path = os.path.join(dir_name, date_str, hash_path) + if not os.path.exists(path): + os.makedirs(path) + if log_file is None: + log_file = open(os.path.join(bak_path,"path_mapping.csv"),"a") + log_file.write(f"{hash_path},{file_path}\n") + reduplicative_file_move(file, path) + + date_list = [] + f_list = os.listdir(dir_name) + for f in f_list: + if is_valid_date(f): + date_list.append(f) + elif f == system_bak_path: + continue + else: + reduplicative_file_move(os.path.join(dir_name, f), bak_path) + + today = time.strftime("%Y%m%d",time.localtime(time.time())) + if today not in date_list: + date_list.append(today) + return sorted(date_list) -def first_page_click(dir_name, page_index, image_index, tabname): - return get_recent_images(dir_name, 1, 0, image_index, tabname) -def end_page_click(dir_name, page_index, image_index, tabname): - return get_recent_images(dir_name, -1, 0, image_index, tabname) +def archive_images(dir_name): + date_list = auto_sorting(dir_name) + date_from = date_list[-show_max_dates_num] if len(date_list) > show_max_dates_num else date_list[0] + return ( + gradio.update(visible=False), + gradio.update(visible=True), + gradio.Dropdown.update(choices=date_list, value=date_list[-1]), + gradio.Dropdown.update(choices=date_list, value=date_from) + ) +def date_to_change(dir_name, page_index, image_index, tabname, date_from, date_to): + #print("date_to", date_to) + date_list = auto_sorting(dir_name) + date_from_list = [date for date in date_list if date <= date_to] + date_from = date_from_list[0] if len(date_from_list) < show_max_dates_num else date_from_list[-show_max_dates_num] + image_list, page_index, image_list, _ =get_recent_images(dir_name, 1, 0, image_index, tabname, date_from, date_to) + return image_list, page_index, image_list, _, gradio.Dropdown.update(choices=date_from_list, value=date_from) -def prev_page_click(dir_name, page_index, image_index, tabname): - return get_recent_images(dir_name, page_index, -1, image_index, tabname) +def first_page_click(dir_name, page_index, image_index, tabname, date_from, date_to): + return get_recent_images(dir_name, 1, 0, image_index, tabname, date_from, date_to) -def next_page_click(dir_name, page_index, image_index, tabname): - return get_recent_images(dir_name, page_index, 1, image_index, tabname) +def end_page_click(dir_name, page_index, image_index, tabname, date_from, date_to): + return get_recent_images(dir_name, -1, 0, image_index, tabname, date_from, date_to) -def page_index_change(dir_name, page_index, image_index, tabname): - return get_recent_images(dir_name, page_index, 0, image_index, tabname) +def prev_page_click(dir_name, page_index, image_index, tabname, date_from, date_to): + return get_recent_images(dir_name, page_index, -1, image_index, tabname, date_from, date_to) -def show_image_info(num, image_path, filenames): - # print(f"select image {num}") - file = filenames[int(num)] - return file, num, os.path.join(image_path, file) +def next_page_click(dir_name, page_index, image_index, tabname, date_from, date_to): + return get_recent_images(dir_name, page_index, 1, image_index, tabname, date_from, date_to) + + +def page_index_change(dir_name, page_index, image_index, tabname, date_from, date_to): + return get_recent_images(dir_name, page_index, 0, image_index, tabname, date_from, date_to) -def delete_image(delete_num, tabname, dir_name, name, page_index, filenames, image_index): +def show_image_info(tabname_box, num, filenames): + # #print(f"select image {num}") + file = filenames[int(num)] + return file, num, file + +def delete_image(delete_num, tabname, name, page_index, filenames, image_index): if name == "": return filenames, delete_num else: @@ -81,21 +183,19 @@ def delete_image(delete_num, tabname, dir_name, name, page_index, filenames, ima new_file_list = [] for name in filenames: if i >= index and i < index + delete_num: - path = os.path.join(dir_name, name) - if os.path.exists(path): - print(f"Delete file {path}") - os.remove(path) - txt_file = os.path.splitext(path)[0] + ".txt" + if os.path.exists(name): + #print(f"Delete file {name}") + os.remove(name) + txt_file = os.path.splitext(name)[0] + ".txt" if os.path.exists(txt_file): os.remove(txt_file) else: - print(f"Not exists file {path}") + #print(f"Not exists file {name}") else: new_file_list.append(name) i += 1 return new_file_list, 1 - def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): if tabname == "txt2img": dir_name = opts.outdir_txt2img_samples @@ -107,16 +207,32 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): dir_name = d[0] for p in d[1:]: dir_name = os.path.join(dir_name, p) - with gr.Row(): - renew_page = gr.Button('Renew Page', elem_id=tabname + "_images_history_renew_page") - first_page = gr.Button('First Page') - prev_page = gr.Button('Prev Page') - page_index = gr.Number(value=1, label="Page Index") - next_page = gr.Button('Next Page') - end_page = gr.Button('End Page') - with gr.Row(elem_id=tabname + "_images_history"): + + f_list = os.listdir(dir_name) + sorted_flag = os.path.exists(os.path.join(dir_name, system_bak_path)) or len(f_list) == 0 + date_list, date_from, date_to = None, None, None + if sorted_flag: + #print(sorted_flag) + date_list = auto_sorting(dir_name) + date_to = date_list[-1] + date_from = date_list[-show_max_dates_num] if len(date_list) > show_max_dates_num else date_list[0] + + with gr.Column(visible=sorted_flag) as page_panel: with gr.Row(): + renew_page = gr.Button('Refresh', elem_id=tabname + "_images_history_renew_page", interactive=sorted_flag) + first_page = gr.Button('First Page') + prev_page = gr.Button('Prev Page') + page_index = gr.Number(value=1, label="Page Index") + next_page = gr.Button('Next Page') + end_page = gr.Button('End Page') + + with gr.Row(elem_id=tabname + "_images_history"): with gr.Column(scale=2): + with gr.Row(): + newest = gr.Button('Newest') + date_to = gr.Dropdown(choices=date_list, value=date_to, label="Date to") + date_from = gr.Dropdown(choices=date_list, value=date_from, label="Date from") + history_gallery = gr.Gallery(show_label=False, elem_id=tabname + "_images_history_gallery").style(grid=6) with gr.Row(): delete_num = gr.Number(value=1, interactive=True, label="number of images to delete consecutively next") @@ -128,22 +244,31 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): with gr.Row(): with gr.Column(): img_file_info = gr.Textbox(label="Generate Info", interactive=False) - img_file_name = gr.Textbox(label="File Name", interactive=False) - with gr.Row(): + img_file_name = gr.Textbox(value="", label="File Name", interactive=False) # hiden items + with gr.Row(visible=False): + img_path = gr.Textbox(dir_name) + tabname_box = gr.Textbox(tabname) + image_index = gr.Textbox(value=-1) + set_index = gr.Button('set_index', elem_id=tabname + "_images_history_set_index") + filenames = gr.State() + hidden = gr.Image(type="pil") + info1 = gr.Textbox() + info2 = gr.Textbox() + with gr.Column(visible=not sorted_flag) as init_warning: + with gr.Row(): + gr.Textbox("The system needs to archive the files according to the date. This requires changing the directory structure of the files", + label="Waring", + css="") + with gr.Row(): + sorted_button = gr.Button('Confirme') - img_path = gr.Textbox(dir_name.rstrip("/"), visible=False) - tabname_box = gr.Textbox(tabname, visible=False) - image_index = gr.Textbox(value=-1, visible=False) - set_index = gr.Button('set_index', elem_id=tabname + "_images_history_set_index", visible=False) - filenames = gr.State() - hidden = gr.Image(type="pil", visible=False) - info1 = gr.Textbox(visible=False) - info2 = gr.Textbox(visible=False) - + + + # turn pages - gallery_inputs = [img_path, page_index, image_index, tabname_box] - gallery_outputs = [history_gallery, page_index, filenames, img_file_name, hidden, img_file_name] + gallery_inputs = [img_path, page_index, image_index, tabname_box, date_from, date_to] + gallery_outputs = [history_gallery, page_index, filenames, img_file_name] first_page.click(first_page_click, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs) next_page.click(next_page_click, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs) @@ -154,15 +279,21 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): # page_index.change(page_index_change, inputs=[tabname_box, img_path, page_index], outputs=[history_gallery, page_index]) # other funcitons - set_index.click(show_image_info, _js="images_history_get_current_img", inputs=[tabname_box, img_path, filenames], outputs=[img_file_name, image_index, hidden]) + set_index.click(show_image_info, _js="images_history_get_current_img", inputs=[tabname_box, image_index, filenames], outputs=[img_file_name, image_index, hidden]) img_file_name.change(fn=None, _js="images_history_enable_del_buttons", inputs=None, outputs=None) - delete.click(delete_image, _js="images_history_delete", inputs=[delete_num, tabname_box, img_path, img_file_name, page_index, filenames, image_index], outputs=[filenames, delete_num]) + delete.click(delete_image, _js="images_history_delete", inputs=[delete_num, tabname_box, img_file_name, page_index, filenames, image_index], outputs=[filenames, delete_num]) hidden.change(fn=run_pnginfo, inputs=[hidden], outputs=[info1, img_file_info, info2]) - + date_to.change(date_to_change, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs + [date_from]) # pnginfo.click(fn=run_pnginfo, inputs=[hidden], outputs=[info1, img_file_info, info2]) switch_dict["fn"](pnginfo_send_to_txt2img, switch_dict["t2i"], img_file_info, 'switch_to_txt2img') switch_dict["fn"](pnginfo_send_to_img2img, switch_dict["i2i"], img_file_info, 'switch_to_img2img_img2img') + sorted_button.click(archive_images, inputs=[img_path], outputs=[init_warning, page_panel, date_to, date_from]) + newest.click(archive_images, inputs=[img_path], outputs=[init_warning, page_panel, date_to, date_from]) + + + + def create_history_tabs(gr, opts, run_pnginfo, switch_dict): with gr.Blocks(analytics_enabled=False) as images_history: -- cgit v1.2.1 From f62905fdf928b54aa76765e5cbde8d538d494e49 Mon Sep 17 00:00:00 2001 From: yfszzx Date: Sun, 16 Oct 2022 21:22:38 +0800 Subject: images history speed up --- modules/images_history.py | 250 ++++++++++++++++++++++++---------------------- 1 file changed, 128 insertions(+), 122 deletions(-) (limited to 'modules') diff --git a/modules/images_history.py b/modules/images_history.py index 7fd75005..ae0b4e40 100644 --- a/modules/images_history.py +++ b/modules/images_history.py @@ -3,8 +3,10 @@ import shutil import time import hashlib import gradio -show_max_dates_num = 3 + system_bak_path = "webui_log_and_bak" +loads_files_num = 216 +num_of_imgs_per_page = 36 def is_valid_date(date): try: time.strptime(date, "%Y%m%d") @@ -53,38 +55,7 @@ def traverse_all_files(curr_path, image_list, all_type=False): image_list = traverse_all_files(file, image_list) return image_list -def get_recent_images(dir_name, page_index, step, image_index, tabname, date_from, date_to): - #print(f"turn_page {page_index}",date_from) - if date_from is None or date_from == "": - return None, 1, None, "" - image_list = [] - date_list = auto_sorting(dir_name) - page_index = int(page_index) - today = time.strftime("%Y%m%d",time.localtime(time.time())) - for date in date_list: - if date >= date_from and date <= date_to: - path = os.path.join(dir_name, date) - if date == today and not os.path.exists(path): - continue - image_list = traverse_all_files(path, image_list) - - image_list = sorted(image_list, key=lambda file: -os.path.getctime(file)) - num = 48 if tabname != "extras" else 12 - max_page_index = len(image_list) // num + 1 - page_index = max_page_index if page_index == -1 else page_index + step - page_index = 1 if page_index < 1 else page_index - page_index = max_page_index if page_index > max_page_index else page_index - idx_frm = (page_index - 1) * num - image_list = image_list[idx_frm:idx_frm + num] - image_index = int(image_index) - if image_index < 0 or image_index > len(image_list) - 1: - current_file = None - else: - current_file = image_list[image_index] - return image_list, page_index, image_list, "" - -def auto_sorting(dir_name): - #print(f"auto sorting") +def auto_sorting(dir_name): bak_path = os.path.join(dir_name, system_bak_path) if not os.path.exists(bak_path): os.mkdir(bak_path) @@ -126,102 +97,131 @@ def auto_sorting(dir_name): today = time.strftime("%Y%m%d",time.localtime(time.time())) if today not in date_list: date_list.append(today) - return sorted(date_list) + return sorted(date_list, reverse=True) -def archive_images(dir_name): +def archive_images(dir_name, date_to): date_list = auto_sorting(dir_name) - date_from = date_list[-show_max_dates_num] if len(date_list) > show_max_dates_num else date_list[0] + today = time.strftime("%Y%m%d",time.localtime(time.time())) + date_to = today if date_to is None or date_to == "" else date_to + filenames = [] + for date in date_list: + if date <= date_to: + path = os.path.join(dir_name, date) + if date == today and not os.path.exists(path): + continue + filenames = traverse_all_files(path, filenames) + if len(filenames) > loads_files_num: + break + filenames = sorted(filenames, key=lambda file: -os.path.getctime(file)) + _, image_list, _, visible_num = get_recent_images(1, 0, filenames) return ( gradio.update(visible=False), gradio.update(visible=True), - gradio.Dropdown.update(choices=date_list, value=date_list[-1]), - gradio.Dropdown.update(choices=date_list, value=date_from) + gradio.Dropdown.update(choices=date_list, value=date_to), + date, + filenames, + 1, + image_list, + "", + visible_num ) +def system_init(dir_name): + ret = [x for x in archive_images(dir_name, None)] + ret += [gradio.update(visible=False)] + return ret + +def newest_click(dir_name, date_to): + if date_to == "start": + return True, False, "start", None, None, 1, None, "" + else: + return archive_images(dir_name, time.strftime("%Y%m%d",time.localtime(time.time()))) -def date_to_change(dir_name, page_index, image_index, tabname, date_from, date_to): - #print("date_to", date_to) - date_list = auto_sorting(dir_name) - date_from_list = [date for date in date_list if date <= date_to] - date_from = date_from_list[0] if len(date_from_list) < show_max_dates_num else date_from_list[-show_max_dates_num] - image_list, page_index, image_list, _ =get_recent_images(dir_name, 1, 0, image_index, tabname, date_from, date_to) - return image_list, page_index, image_list, _, gradio.Dropdown.update(choices=date_from_list, value=date_from) - -def first_page_click(dir_name, page_index, image_index, tabname, date_from, date_to): - return get_recent_images(dir_name, 1, 0, image_index, tabname, date_from, date_to) - - -def end_page_click(dir_name, page_index, image_index, tabname, date_from, date_to): - return get_recent_images(dir_name, -1, 0, image_index, tabname, date_from, date_to) - - -def prev_page_click(dir_name, page_index, image_index, tabname, date_from, date_to): - return get_recent_images(dir_name, page_index, -1, image_index, tabname, date_from, date_to) - - -def next_page_click(dir_name, page_index, image_index, tabname, date_from, date_to): - return get_recent_images(dir_name, page_index, 1, image_index, tabname, date_from, date_to) - - -def page_index_change(dir_name, page_index, image_index, tabname, date_from, date_to): - return get_recent_images(dir_name, page_index, 0, image_index, tabname, date_from, date_to) - - -def show_image_info(tabname_box, num, filenames): - # #print(f"select image {num}") - file = filenames[int(num)] - return file, num, file - -def delete_image(delete_num, tabname, name, page_index, filenames, image_index): +def delete_image(delete_num, name, filenames, image_index, visible_num): if name == "": return filenames, delete_num else: delete_num = int(delete_num) + visible_num = int(visible_num) + image_index = int(image_index) index = list(filenames).index(name) i = 0 new_file_list = [] for name in filenames: if i >= index and i < index + delete_num: if os.path.exists(name): - #print(f"Delete file {name}") + if visible_num == image_index: + new_file_list.append(name) + continue + print(f"Delete file {name}") os.remove(name) + visible_num -= 1 txt_file = os.path.splitext(name)[0] + ".txt" if os.path.exists(txt_file): os.remove(txt_file) else: - #print(f"Not exists file {name}") + print(f"Not exists file {name}") else: new_file_list.append(name) i += 1 - return new_file_list, 1 + return new_file_list, 1, visible_num + +def get_recent_images(page_index, step, filenames): + page_index = int(page_index) + max_page_index = len(filenames) // num_of_imgs_per_page + 1 + page_index = max_page_index if page_index == -1 else page_index + step + page_index = 1 if page_index < 1 else page_index + page_index = max_page_index if page_index > max_page_index else page_index + idx_frm = (page_index - 1) * num_of_imgs_per_page + image_list = filenames[idx_frm:idx_frm + num_of_imgs_per_page] + length = len(filenames) + visible_num = num_of_imgs_per_page if idx_frm + num_of_imgs_per_page <= length else length % num_of_imgs_per_page + visible_num = num_of_imgs_per_page if visible_num == 0 else visible_num + return page_index, image_list, "", visible_num + +def first_page_click(page_index, filenames): + return get_recent_images(1, 0, filenames) + +def end_page_click(page_index, filenames): + return get_recent_images(-1, 0, filenames) + +def prev_page_click(page_index, filenames): + return get_recent_images(page_index, -1, filenames) + +def next_page_click(page_index, filenames): + return get_recent_images(page_index, 1, filenames) + +def page_index_change(page_index, filenames): + return get_recent_images(page_index, 0, filenames) + +def show_image_info(tabname_box, num, page_index, filenames): + file = filenames[int(num) + int((page_index - 1) * num_of_imgs_per_page)] + return file, num, file def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): - if opts.outdir_samples != "": - dir_name = opts.outdir_samples - elif tabname == "txt2img": + if tabname == "txt2img": dir_name = opts.outdir_txt2img_samples elif tabname == "img2img": dir_name = opts.outdir_img2img_samples elif tabname == "extras": dir_name = opts.outdir_extras_samples + elif tabname == "saved": + dir_name = opts.outdir_save + if not os.path.exists(dir_name): + os.makedirs(dir_name) d = dir_name.split("/") - dir_name = "/" if dir_name.startswith("/") else d[0] + dir_name = d[0] for p in d[1:]: dir_name = os.path.join(dir_name, p) f_list = os.listdir(dir_name) sorted_flag = os.path.exists(os.path.join(dir_name, system_bak_path)) or len(f_list) == 0 date_list, date_from, date_to = None, None, None - if sorted_flag: - #print(sorted_flag) - date_list = auto_sorting(dir_name) - date_to = date_list[-1] - date_from = date_list[-show_max_dates_num] if len(date_list) > show_max_dates_num else date_list[0] with gr.Column(visible=sorted_flag) as page_panel: with gr.Row(): - renew_page = gr.Button('Refresh', elem_id=tabname + "_images_history_renew_page", interactive=sorted_flag) + #renew_page = gr.Button('Refresh') first_page = gr.Button('First Page') prev_page = gr.Button('Prev Page') page_index = gr.Number(value=1, label="Page Index") @@ -231,9 +231,9 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): with gr.Row(elem_id=tabname + "_images_history"): with gr.Column(scale=2): with gr.Row(): - newest = gr.Button('Newest') - date_to = gr.Dropdown(choices=date_list, value=date_to, label="Date to") - date_from = gr.Dropdown(choices=date_list, value=date_from, label="Date from") + newest = gr.Button('Refresh', elem_id=tabname + "_images_history_start") + date_from = gr.Textbox(label="Date from", interactive=False) + date_to = gr.Dropdown(value="start" if not sorted_flag else None, label="Date to") history_gallery = gr.Gallery(show_label=False, elem_id=tabname + "_images_history_gallery").style(grid=6) with gr.Row(): @@ -247,66 +247,72 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): with gr.Column(): img_file_info = gr.Textbox(label="Generate Info", interactive=False) img_file_name = gr.Textbox(value="", label="File Name", interactive=False) + # hiden items - with gr.Row(visible=False): + with gr.Row(visible=False): + visible_img_num = gr.Number() img_path = gr.Textbox(dir_name) tabname_box = gr.Textbox(tabname) image_index = gr.Textbox(value=-1) set_index = gr.Button('set_index', elem_id=tabname + "_images_history_set_index") filenames = gr.State() + all_images_list = gr.State() hidden = gr.Image(type="pil") info1 = gr.Textbox() info2 = gr.Textbox() + with gr.Column(visible=not sorted_flag) as init_warning: with gr.Row(): - gr.Textbox("The system needs to archive the files according to the date. This requires changing the directory structure of the files", - label="Waring", - css="") + warning = gr.Textbox( + label="Waring", + value=f"The system needs to archive the files according to the date. This requires changing the directory structure of the files.If you have doubts about this operation, you can first back up the files in the '{dir_name}' directory" + ) + warning.style(height=100, width=50) with gr.Row(): sorted_button = gr.Button('Confirme') - - + change_date_output = [init_warning, page_panel, date_to, date_from, filenames, page_index, history_gallery, img_file_name, visible_img_num] + sorted_button.click(system_init, inputs=[img_path], outputs=change_date_output + [sorted_button]) + newest.click(newest_click, inputs=[img_path, date_to], outputs=change_date_output) + date_to.change(archive_images, inputs=[img_path, date_to], outputs=change_date_output) + date_to.change(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") + newest.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") + + delete.click(delete_image, inputs=[delete_num, img_file_name, filenames, image_index, visible_img_num], outputs=[filenames, delete_num, visible_img_num]) + delete.click(fn=None, _js="images_history_delete", inputs=[delete_num, tabname_box, image_index], outputs=None) + # turn pages - gallery_inputs = [img_path, page_index, image_index, tabname_box, date_from, date_to] - gallery_outputs = [history_gallery, page_index, filenames, img_file_name] + gallery_inputs = [page_index, filenames] + gallery_outputs = [page_index, history_gallery, img_file_name, visible_img_num] + + first_page.click(first_page_click, inputs=gallery_inputs, outputs=gallery_outputs) + next_page.click(next_page_click, inputs=gallery_inputs, outputs=gallery_outputs) + prev_page.click(prev_page_click, inputs=gallery_inputs, outputs=gallery_outputs) + end_page.click(end_page_click, inputs=gallery_inputs, outputs=gallery_outputs) + page_index.submit(page_index_change, inputs=gallery_inputs, outputs=gallery_outputs) - first_page.click(first_page_click, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs) - next_page.click(next_page_click, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs) - prev_page.click(prev_page_click, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs) - end_page.click(end_page_click, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs) - page_index.submit(page_index_change, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs) - renew_page.click(page_index_change, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs) - # page_index.change(page_index_change, inputs=[tabname_box, img_path, page_index], outputs=[history_gallery, page_index]) + first_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") + next_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") + prev_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") + end_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") + page_index.submit(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") # other funcitons - set_index.click(show_image_info, _js="images_history_get_current_img", inputs=[tabname_box, image_index, filenames], outputs=[img_file_name, image_index, hidden]) - img_file_name.change(fn=None, _js="images_history_enable_del_buttons", inputs=None, outputs=None) - delete.click(delete_image, _js="images_history_delete", inputs=[delete_num, tabname_box, img_file_name, page_index, filenames, image_index], outputs=[filenames, delete_num]) + set_index.click(show_image_info, _js="images_history_get_current_img", inputs=[tabname_box, image_index, page_index, filenames], outputs=[img_file_name, image_index, hidden]) + img_file_name.change(fn=None, _js="images_history_enable_del_buttons", inputs=None, outputs=None) hidden.change(fn=run_pnginfo, inputs=[hidden], outputs=[info1, img_file_info, info2]) - date_to.change(date_to_change, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs + [date_from]) - # pnginfo.click(fn=run_pnginfo, inputs=[hidden], outputs=[info1, img_file_info, info2]) + switch_dict["fn"](pnginfo_send_to_txt2img, switch_dict["t2i"], img_file_info, 'switch_to_txt2img') switch_dict["fn"](pnginfo_send_to_img2img, switch_dict["i2i"], img_file_info, 'switch_to_img2img_img2img') - sorted_button.click(archive_images, inputs=[img_path], outputs=[init_warning, page_panel, date_to, date_from]) - newest.click(archive_images, inputs=[img_path], outputs=[init_warning, page_panel, date_to, date_from]) - - - def create_history_tabs(gr, opts, run_pnginfo, switch_dict): with gr.Blocks(analytics_enabled=False) as images_history: with gr.Tabs() as tabs: - with gr.Tab("txt2img history"): - with gr.Blocks(analytics_enabled=False) as images_history_txt2img: - show_images_history(gr, opts, "txt2img", run_pnginfo, switch_dict) - with gr.Tab("img2img history"): - with gr.Blocks(analytics_enabled=False) as images_history_img2img: - show_images_history(gr, opts, "img2img", run_pnginfo, switch_dict) - with gr.Tab("extras history"): - with gr.Blocks(analytics_enabled=False) as images_history_img2img: - show_images_history(gr, opts, "extras", run_pnginfo, switch_dict) + for tab in ["saved", "txt2img", "img2img", "extras"]: + with gr.Tab(tab): + with gr.Blocks(analytics_enabled=False) as images_history_img2img: + show_images_history(gr, opts, tab, run_pnginfo, switch_dict) return images_history -- cgit v1.2.1 From a4de699e3c235d83b5a957d08779cb41cb0781bc Mon Sep 17 00:00:00 2001 From: yfszzx Date: Sun, 16 Oct 2022 22:37:12 +0800 Subject: Images history speed up --- modules/images_history.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/images_history.py b/modules/images_history.py index ae0b4e40..94bd16a8 100644 --- a/modules/images_history.py +++ b/modules/images_history.py @@ -153,6 +153,7 @@ def delete_image(delete_num, name, filenames, image_index, visible_num): if os.path.exists(name): if visible_num == image_index: new_file_list.append(name) + i += 1 continue print(f"Delete file {name}") os.remove(name) @@ -221,7 +222,7 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): with gr.Column(visible=sorted_flag) as page_panel: with gr.Row(): - #renew_page = gr.Button('Refresh') + renew_page = gr.Button('Refresh page', elem_id=tabname + "_images_history_renew_page") first_page = gr.Button('First Page') prev_page = gr.Button('Prev Page') page_index = gr.Number(value=1, label="Page Index") @@ -231,7 +232,7 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): with gr.Row(elem_id=tabname + "_images_history"): with gr.Column(scale=2): with gr.Row(): - newest = gr.Button('Refresh', elem_id=tabname + "_images_history_start") + newest = gr.Button('Reload', elem_id=tabname + "_images_history_start") date_from = gr.Textbox(label="Date from", interactive=False) date_to = gr.Dropdown(value="start" if not sorted_flag else None, label="Date to") @@ -291,12 +292,14 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): prev_page.click(prev_page_click, inputs=gallery_inputs, outputs=gallery_outputs) end_page.click(end_page_click, inputs=gallery_inputs, outputs=gallery_outputs) page_index.submit(page_index_change, inputs=gallery_inputs, outputs=gallery_outputs) + renew_page.click(page_index_change, inputs=gallery_inputs, outputs=gallery_outputs) first_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") next_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") prev_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") end_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") page_index.submit(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") + renew_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") # other funcitons set_index.click(show_image_info, _js="images_history_get_current_img", inputs=[tabname_box, image_index, page_index, filenames], outputs=[img_file_name, image_index, hidden]) -- cgit v1.2.1 From 9d702b16f01795c3af900e0ebd70faf4b25200f6 Mon Sep 17 00:00:00 2001 From: yfszzx Date: Mon, 17 Oct 2022 16:11:03 +0800 Subject: fix two little bug --- modules/images_history.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/images_history.py b/modules/images_history.py index 23045df1..1ae168ca 100644 --- a/modules/images_history.py +++ b/modules/images_history.py @@ -133,7 +133,7 @@ def archive_images(dir_name, date_to): date = sort_array[loads_num][2] filenames = [x[1] for x in sort_array] else: - date = sort_array[loads_num][2] + date = sort_array[-1][2] filenames = [x[1] for x in sort_array] filenames = [x[1] for x in sort_array if x[2]>= date] _, image_list, _, visible_num = get_recent_images(1, 0, filenames) @@ -334,6 +334,6 @@ def create_history_tabs(gr, sys_opts, run_pnginfo, switch_dict): with gr.Tab(tab): with gr.Blocks(analytics_enabled=False) as images_history_img2img: show_images_history(gr, opts, tab, run_pnginfo, switch_dict) - gradio.Checkbox(opts.images_history_reconstruct_directory, elem_id="images_history_reconstruct_directory") #, visible=False) + gradio.Checkbox(opts.images_history_reconstruct_directory, elem_id="images_history_reconstruct_directory", visible=False) return images_history -- cgit v1.2.1 From c408a0b41cfffde184cad35b2d97346342947d83 Mon Sep 17 00:00:00 2001 From: yfszzx Date: Mon, 17 Oct 2022 22:28:43 +0800 Subject: fix two bug --- modules/images_history.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/images_history.py b/modules/images_history.py index 1ae168ca..10e5b970 100644 --- a/modules/images_history.py +++ b/modules/images_history.py @@ -181,7 +181,8 @@ def delete_image(delete_num, name, filenames, image_index, visible_num): return new_file_list, 1, visible_num def save_image(file_name): - shutil.copy2(file_name, opts.outdir_save) + if file_name is not None and os.path.exists(file_name): + shutil.copy2(file_name, opts.outdir_save) def get_recent_images(page_index, step, filenames): page_index = int(page_index) @@ -327,7 +328,6 @@ def create_history_tabs(gr, sys_opts, run_pnginfo, switch_dict): opts = sys_opts loads_files_num = int(opts.images_history_num_per_page) num_of_imgs_per_page = int(opts.images_history_num_per_page * opts.images_history_pages_num) - backup_flag = opts.images_history_backup with gr.Blocks(analytics_enabled=False) as images_history: with gr.Tabs() as tabs: for tab in ["txt2img", "img2img", "extras", "saved"]: -- cgit v1.2.1 From 2272cf2f35fafd5cd486bfb4ee89df5bbc625b97 Mon Sep 17 00:00:00 2001 From: yfszzx Date: Mon, 17 Oct 2022 23:04:42 +0800 Subject: fix two bug --- modules/images_history.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/images_history.py b/modules/images_history.py index 10e5b970..1c1790a4 100644 --- a/modules/images_history.py +++ b/modules/images_history.py @@ -133,7 +133,7 @@ def archive_images(dir_name, date_to): date = sort_array[loads_num][2] filenames = [x[1] for x in sort_array] else: - date = sort_array[-1][2] + date = None if len(sort_array) == 0 else sort_array[-1][2] filenames = [x[1] for x in sort_array] filenames = [x[1] for x in sort_array if x[2]>= date] _, image_list, _, visible_num = get_recent_images(1, 0, filenames) -- cgit v1.2.1 From 2b5b62e768d892773a7ec1d5e8d8cea23aae1254 Mon Sep 17 00:00:00 2001 From: yfszzx Date: Mon, 17 Oct 2022 23:14:03 +0800 Subject: fix two bug --- modules/images_history.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/images_history.py b/modules/images_history.py index 1c1790a4..20324557 100644 --- a/modules/images_history.py +++ b/modules/images_history.py @@ -44,7 +44,7 @@ def traverse_all_files(curr_path, image_list, all_type=False): return image_list for file in f_list: file = os.path.join(curr_path, file) - if (not all_type) and file[-4:] == ".txt": + if (not all_type) and (file[-4:] == ".txt" or file[-4:] == ".csv"): pass elif os.path.isfile(file) and file[-10:].rfind(".") > 0: image_list.append(file) @@ -182,7 +182,7 @@ def delete_image(delete_num, name, filenames, image_index, visible_num): def save_image(file_name): if file_name is not None and os.path.exists(file_name): - shutil.copy2(file_name, opts.outdir_save) + shutil.copy(file_name, opts.outdir_save) def get_recent_images(page_index, step, filenames): page_index = int(page_index) -- cgit v1.2.1 From eb299527b1e5d1f83a14641647fca72e8fb305ac Mon Sep 17 00:00:00 2001 From: yfszzx Date: Tue, 18 Oct 2022 20:14:11 +0800 Subject: Image browser --- modules/images_history.py | 227 ++++++++++++++++++++++++++++++---------------- modules/shared.py | 7 +- modules/ui.py | 2 +- 3 files changed, 154 insertions(+), 82 deletions(-) (limited to 'modules') diff --git a/modules/images_history.py b/modules/images_history.py index 20324557..d56f3a25 100644 --- a/modules/images_history.py +++ b/modules/images_history.py @@ -4,6 +4,7 @@ import time import hashlib import gradio system_bak_path = "webui_log_and_bak" +browser_tabname = "custom" def is_valid_date(date): try: time.strptime(date, "%Y%m%d") @@ -99,13 +100,15 @@ def auto_sorting(dir_name): date_list.append(today) return sorted(date_list, reverse=True) -def archive_images(dir_name, date_to): +def archive_images(dir_name, date_to): + filenames = [] loads_num =int(opts.images_history_num_per_page * opts.images_history_pages_num) + today = time.strftime("%Y%m%d",time.localtime(time.time())) + date_to = today if date_to is None or date_to == "" else date_to + date_to_bak = date_to if opts.images_history_reconstruct_directory: - date_list = auto_sorting(dir_name) - today = time.strftime("%Y%m%d",time.localtime(time.time())) - date_to = today if date_to is None or date_to == "" else date_to + date_list = auto_sorting(dir_name) for date in date_list: if date <= date_to: path = os.path.join(dir_name, date) @@ -120,7 +123,7 @@ def archive_images(dir_name, date_to): tmparray = [(os.path.getmtime(file), file) for file in filenames ] date_stamp = time.mktime(time.strptime(date_to, "%Y%m%d")) + 86400 filenames = [] - date_list = {} + date_list = {date_to:None} date = time.strftime("%Y%m%d",time.localtime(time.time())) for t, f in tmparray: date = time.strftime("%Y%m%d",time.localtime(t)) @@ -133,22 +136,29 @@ def archive_images(dir_name, date_to): date = sort_array[loads_num][2] filenames = [x[1] for x in sort_array] else: - date = None if len(sort_array) == 0 else sort_array[-1][2] + date = date_to if len(sort_array) == 0 else sort_array[-1][2] filenames = [x[1] for x in sort_array] - filenames = [x[1] for x in sort_array if x[2]>= date] - _, image_list, _, visible_num = get_recent_images(1, 0, filenames) + filenames = [x[1] for x in sort_array if x[2]>= date] + num = len(filenames) + last_date_from = date_to_bak if num == 0 else time.strftime("%Y%m%d", time.localtime(time.mktime(time.strptime(date, "%Y%m%d")) - 1000)) + date = date[:4] + "-" + date[4:6] + "-" + date[6:8] + date_to_bak = date_to_bak[:4] + "-" + date_to_bak[4:6] + "-" + date_to_bak[6:8] + load_info = f"Loaded {(num + 1) // opts.images_history_pages_num} pades, {num} images, during {date} - {date_to_bak}" + _, image_list, _, _, visible_num = get_recent_images(1, 0, filenames) return ( gradio.Dropdown.update(choices=date_list, value=date_to), - date, + load_info, filenames, 1, image_list, "", - visible_num + "", + visible_num, + last_date_from ) -def newest_click(dir_name, date_to): - return archive_images(dir_name, time.strftime("%Y%m%d",time.localtime(time.time()))) + + def delete_image(delete_num, name, filenames, image_index, visible_num): if name == "": @@ -196,7 +206,29 @@ def get_recent_images(page_index, step, filenames): length = len(filenames) visible_num = num_of_imgs_per_page if idx_frm + num_of_imgs_per_page <= length else length % num_of_imgs_per_page visible_num = num_of_imgs_per_page if visible_num == 0 else visible_num - return page_index, image_list, "", visible_num + return page_index, image_list, "", "", visible_num + +def newest_click(date_to): + if date_to is None: + return time.strftime("%Y%m%d",time.localtime(time.time())), [] + else: + return None, [] +def forward_click(last_date_from, date_to_recorder): + if len(date_to_recorder) == 0: + return None, [] + if last_date_from == date_to_recorder[-1]: + date_to_recorder = date_to_recorder[:-1] + if len(date_to_recorder) == 0: + return None, [] + return date_to_recorder[-1], date_to_recorder[:-1] + +def backward_click(last_date_from, date_to_recorder): + if last_date_from is None or last_date_from == "": + return time.strftime("%Y%m%d",time.localtime(time.time())), [] + if len(date_to_recorder) == 0 or last_date_from != date_to_recorder[-1]: + date_to_recorder.append(last_date_from) + return last_date_from, date_to_recorder + def first_page_click(page_index, filenames): return get_recent_images(1, 0, filenames) @@ -214,13 +246,33 @@ def page_index_change(page_index, filenames): return get_recent_images(page_index, 0, filenames) def show_image_info(tabname_box, num, page_index, filenames): - file = filenames[int(num) + int((page_index - 1) * int(opts.images_history_num_per_page))] - return file, num, file + file = filenames[int(num) + int((page_index - 1) * int(opts.images_history_num_per_page))] + tm = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(os.path.getmtime(file))) + return file, tm, num, file def enable_page_buttons(): return gradio.update(visible=True) +def change_dir(img_dir, date_to): + warning = None + try: + if os.path.exists(img_dir): + try: + f = os.listdir(img_dir) + except: + warning = f"'{img_dir} is not a directory" + else: + warning = "The directory is not exist" + except: + warning = "The format of the directory is incorrect" + if warning is None: + today = time.strftime("%Y%m%d",time.localtime(time.time())) + return gradio.update(visible=False), gradio.update(visible=True), None, None if date_to != today else today + else: + return gradio.update(visible=True), gradio.update(visible=False), warning, date_to + def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): + custom_dir = False if tabname == "txt2img": dir_name = opts.outdir_txt2img_samples elif tabname == "img2img": @@ -229,69 +281,85 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): dir_name = opts.outdir_extras_samples elif tabname == "saved": dir_name = opts.outdir_save + else: + custom_dir = True + dir_name = None + + if not custom_dir: + d = dir_name.split("/") + dir_name = d[0] + for p in d[1:]: + dir_name = os.path.join(dir_name, p) + if not os.path.exists(dir_name): + os.makedirs(dir_name) - d = dir_name.split("/") - dir_name = d[0] - for p in d[1:]: - dir_name = os.path.join(dir_name, p) - if not os.path.exists(dir_name): - os.makedirs(dir_name) - - with gr.Column() as page_panel: - with gr.Row(visible=False) as turn_page_buttons: - renew_page = gr.Button('Refresh page', elem_id=tabname + "_images_history_renew_page") - first_page = gr.Button('First Page') - prev_page = gr.Button('Prev Page') - page_index = gr.Number(value=1, label="Page Index") - next_page = gr.Button('Next Page') - end_page = gr.Button('End Page') - - with gr.Row(elem_id=tabname + "_images_history"): - with gr.Column(scale=2): - with gr.Row(): - newest = gr.Button('Reload', elem_id=tabname + "_images_history_start") - date_from = gr.Textbox(label="Date from", interactive=False) - date_to = gr.Dropdown(label="Date to") - - history_gallery = gr.Gallery(show_label=False, elem_id=tabname + "_images_history_gallery").style(grid=6) - with gr.Row(): - delete_num = gr.Number(value=1, interactive=True, label="number of images to delete consecutively next") - delete = gr.Button('Delete', elem_id=tabname + "_images_history_del_button") - - with gr.Column(): - with gr.Row(): - if tabname != "saved": - save_btn = gr.Button('Save') - pnginfo_send_to_txt2img = gr.Button('Send to txt2img') - pnginfo_send_to_img2img = gr.Button('Send to img2img') - with gr.Row(): - with gr.Column(): - img_file_info = gr.Textbox(label="Generate Info", interactive=False) - img_file_name = gr.Textbox(value="", label="File Name", interactive=False) + with gr.Column() as page_panel: + with gr.Row(): + img_path = gr.Textbox(dir_name, label="Images directory", placeholder="Input images directory") + with gr.Row(visible=False) as warning: + warning_box = gr.Textbox("Message", interactive=False) + with gr.Row(visible=not custom_dir, elem_id=tabname + "_images_history") as main_panel: + with gr.Column(scale=2): + with gr.Row(): + backward = gr.Button('Backward') + date_to = gr.Dropdown(label="Date to") + forward = gr.Button('Forward') + newest = gr.Button('Reload', elem_id=tabname + "_images_history_start") + with gr.Row(): + load_info = gr.Textbox(show_label=False, interactive=False) + with gr.Row(visible=False) as turn_page_buttons: + renew_page = gr.Button('Refresh page', elem_id=tabname + "_images_history_renew_page") + first_page = gr.Button('First Page') + prev_page = gr.Button('Prev Page') + page_index = gr.Number(value=1, label="Page Index") + next_page = gr.Button('Next Page') + end_page = gr.Button('End Page') + + history_gallery = gr.Gallery(show_label=False, elem_id=tabname + "_images_history_gallery").style(grid=opts.images_history_grid_num) + with gr.Row(): + delete_num = gr.Number(value=1, interactive=True, label="number of images to delete consecutively next") + delete = gr.Button('Delete', elem_id=tabname + "_images_history_del_button") - # hiden items - with gr.Row(visible=False): - visible_img_num = gr.Number() - img_path = gr.Textbox(dir_name) - tabname_box = gr.Textbox(tabname) - image_index = gr.Textbox(value=-1) - set_index = gr.Button('set_index', elem_id=tabname + "_images_history_set_index") - filenames = gr.State() - all_images_list = gr.State() - hidden = gr.Image(type="pil") - info1 = gr.Textbox() - info2 = gr.Textbox() + with gr.Column(): + with gr.Row(): + if tabname != "saved": + save_btn = gr.Button('Save') + pnginfo_send_to_txt2img = gr.Button('Send to txt2img') + pnginfo_send_to_img2img = gr.Button('Send to img2img') + with gr.Row(): + with gr.Column(): + img_file_info = gr.Textbox(label="Generate Info", interactive=False) + img_file_name = gr.Textbox(value="", label="File Name", interactive=False) + img_file_time= gr.Textbox(value="", label="Create Time", interactive=False) - + + # hiden items + with gr.Row(): #visible=False): + visible_img_num = gr.Number() + date_to_recorder = gr.State([]) + last_date_from = gr.Textbox() + tabname_box = gr.Textbox(tabname) + image_index = gr.Textbox(value=-1) + set_index = gr.Button('set_index', elem_id=tabname + "_images_history_set_index") + filenames = gr.State() + all_images_list = gr.State() + hidden = gr.Image(type="pil") + info1 = gr.Textbox() + info2 = gr.Textbox() + + img_path.submit(change_dir, inputs=[img_path, date_to], outputs=[warning, main_panel, warning_box, date_to]) #change date - change_date_output = [date_to, date_from, filenames, page_index, history_gallery, img_file_name, visible_img_num] - newest.click(newest_click, inputs=[img_path, date_to], outputs=change_date_output) - date_to.change(archive_images, inputs=[img_path, date_to], outputs=change_date_output) - newest.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") - date_to.change(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") - date_to.change(enable_page_buttons, inputs=None, outputs=[turn_page_buttons]) - newest.click(enable_page_buttons, inputs=None, outputs=[turn_page_buttons]) + change_date_output = [date_to, load_info, filenames, page_index, history_gallery, img_file_name, img_file_time, visible_img_num, last_date_from] + + date_to.change(archive_images, inputs=[img_path, date_to], outputs=change_date_output) + date_to.change(enable_page_buttons, inputs=None, outputs=[turn_page_buttons]) + date_to.change(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") + + newest.click(newest_click, inputs=[date_to], outputs=[date_to, date_to_recorder]) + forward.click(forward_click, inputs=[last_date_from, date_to_recorder], outputs=[date_to, date_to_recorder]) + backward.click(backward_click, inputs=[last_date_from, date_to_recorder], outputs=[date_to, date_to_recorder]) + #delete delete.click(delete_image, inputs=[delete_num, img_file_name, filenames, image_index, visible_img_num], outputs=[filenames, delete_num, visible_img_num]) @@ -301,7 +369,7 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): #turn page gallery_inputs = [page_index, filenames] - gallery_outputs = [page_index, history_gallery, img_file_name, visible_img_num] + gallery_outputs = [page_index, history_gallery, img_file_name, img_file_time, visible_img_num] first_page.click(first_page_click, inputs=gallery_inputs, outputs=gallery_outputs) next_page.click(next_page_click, inputs=gallery_inputs, outputs=gallery_outputs) prev_page.click(prev_page_click, inputs=gallery_inputs, outputs=gallery_outputs) @@ -317,12 +385,14 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): renew_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") # other funcitons - set_index.click(show_image_info, _js="images_history_get_current_img", inputs=[tabname_box, image_index, page_index, filenames], outputs=[img_file_name, image_index, hidden]) + set_index.click(show_image_info, _js="images_history_get_current_img", inputs=[tabname_box, image_index, page_index, filenames], outputs=[img_file_name, img_file_time, image_index, hidden]) img_file_name.change(fn=None, _js="images_history_enable_del_buttons", inputs=None, outputs=None) hidden.change(fn=run_pnginfo, inputs=[hidden], outputs=[info1, img_file_info, info2]) switch_dict["fn"](pnginfo_send_to_txt2img, switch_dict["t2i"], img_file_info, 'switch_to_txt2img') switch_dict["fn"](pnginfo_send_to_img2img, switch_dict["i2i"], img_file_info, 'switch_to_img2img_img2img') + + def create_history_tabs(gr, sys_opts, run_pnginfo, switch_dict): global opts; opts = sys_opts @@ -330,10 +400,11 @@ def create_history_tabs(gr, sys_opts, run_pnginfo, switch_dict): num_of_imgs_per_page = int(opts.images_history_num_per_page * opts.images_history_pages_num) with gr.Blocks(analytics_enabled=False) as images_history: with gr.Tabs() as tabs: - for tab in ["txt2img", "img2img", "extras", "saved"]: + for tab in [browser_tabname, "txt2img", "img2img", "extras", "saved"]: with gr.Tab(tab): - with gr.Blocks(analytics_enabled=False) as images_history_img2img: + with gr.Blocks(analytics_enabled=False) : show_images_history(gr, opts, tab, run_pnginfo, switch_dict) - gradio.Checkbox(opts.images_history_reconstruct_directory, elem_id="images_history_reconstruct_directory", visible=False) + #gradio.Checkbox(opts.images_history_reconstruct_directory, elem_id="images_history_reconstruct_directory", visible=False) + gradio.Checkbox(opts.images_history_reconstruct_directory, elem_id="images_history_finish_render", visible=False) return images_history diff --git a/modules/shared.py b/modules/shared.py index c2ea4186..1811018d 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -309,10 +309,11 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), })) -options_templates.update(options_section(('images-history', "Images history"), { - "images_history_reconstruct_directory": OptionInfo(False, "Reconstruct output directory structure.This can greatly improve the speed of loading , but will change the original output directory structure"), +options_templates.update(options_section(('images-history', "Images Browser"), { + #"images_history_reconstruct_directory": OptionInfo(False, "Reconstruct output directory structure.This can greatly improve the speed of loading , but will change the original output directory structure"), "images_history_num_per_page": OptionInfo(36, "Number of pictures displayed on each page"), - "images_history_pages_num": OptionInfo(6, "Maximum number of pages per load "), + "images_history_pages_num": OptionInfo(6, "Minimum number of pages per load "), + "images_history_grid_num": OptionInfo(6, "Number of grids in each row"), })) diff --git a/modules/ui.py b/modules/ui.py index 43dc88fc..85abac4d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1548,7 +1548,7 @@ Requested path was: {f} (img2img_interface, "img2img", "img2img"), (extras_interface, "Extras", "extras"), (pnginfo_interface, "PNG Info", "pnginfo"), - (images_history, "History", "images_history"), + (images_history, "Image Browser", "images_history"), (modelmerger_interface, "Checkpoint Merger", "modelmerger"), (train_interface, "Train", "ti"), (settings_interface, "Settings", "settings"), -- cgit v1.2.1 From b7e78ef692fe912916de6e54f6e2521b000d650c Mon Sep 17 00:00:00 2001 From: yfszzx Date: Tue, 18 Oct 2022 22:21:54 +0800 Subject: Image browser improve --- modules/images_history.py | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) (limited to 'modules') diff --git a/modules/images_history.py b/modules/images_history.py index d56f3a25..a40cdc0e 100644 --- a/modules/images_history.py +++ b/modules/images_history.py @@ -100,14 +100,15 @@ def auto_sorting(dir_name): date_list.append(today) return sorted(date_list, reverse=True) -def archive_images(dir_name, date_to): - +def archive_images(dir_name, date_to): filenames = [] - loads_num =int(opts.images_history_num_per_page * opts.images_history_pages_num) + batch_size =int(opts.images_history_num_per_page * opts.images_history_pages_num) + if batch_size <= 0: + batch_size = opts.images_history_num_per_page * 6 today = time.strftime("%Y%m%d",time.localtime(time.time())) date_to = today if date_to is None or date_to == "" else date_to date_to_bak = date_to - if opts.images_history_reconstruct_directory: + if False: #opts.images_history_reconstruct_directory: date_list = auto_sorting(dir_name) for date in date_list: if date <= date_to: @@ -115,11 +116,13 @@ def archive_images(dir_name, date_to): if date == today and not os.path.exists(path): continue filenames = traverse_all_files(path, filenames) - if len(filenames) > loads_num: + if len(filenames) > batch_size: break filenames = sorted(filenames, key=lambda file: -os.path.getmtime(file)) else: - filenames = traverse_all_files(dir_name, filenames) + filenames = traverse_all_files(dir_name, filenames) + total_num = len(filenames) + batch_count = len(filenames) + 1 // batch_size + 1 tmparray = [(os.path.getmtime(file), file) for file in filenames ] date_stamp = time.mktime(time.strptime(date_to, "%Y%m%d")) + 86400 filenames = [] @@ -132,8 +135,8 @@ def archive_images(dir_name, date_to): filenames.append((t, f ,date)) date_list = sorted(list(date_list.keys()), reverse=True) sort_array = sorted(filenames, key=lambda x:-x[0]) - if len(sort_array) > loads_num: - date = sort_array[loads_num][2] + if len(sort_array) > batch_size: + date = sort_array[batch_size][2] filenames = [x[1] for x in sort_array] else: date = date_to if len(sort_array) == 0 else sort_array[-1][2] @@ -141,9 +144,9 @@ def archive_images(dir_name, date_to): filenames = [x[1] for x in sort_array if x[2]>= date] num = len(filenames) last_date_from = date_to_bak if num == 0 else time.strftime("%Y%m%d", time.localtime(time.mktime(time.strptime(date, "%Y%m%d")) - 1000)) - date = date[:4] + "-" + date[4:6] + "-" + date[6:8] - date_to_bak = date_to_bak[:4] + "-" + date_to_bak[4:6] + "-" + date_to_bak[6:8] - load_info = f"Loaded {(num + 1) // opts.images_history_pages_num} pades, {num} images, during {date} - {date_to_bak}" + date = date[:4] + "/" + date[4:6] + "/" + date[6:8] + date_to_bak = date_to_bak[:4] + "/" + date_to_bak[4:6] + "/" + date_to_bak[6:8] + load_info = f"{total_num} images in this directory. Loaded {num} images during {date} - {date_to_bak}, divided into {int((num + 1) // opts.images_history_num_per_page + 1)} pages" _, image_list, _, _, visible_num = get_recent_images(1, 0, filenames) return ( gradio.Dropdown.update(choices=date_list, value=date_to), @@ -154,12 +157,10 @@ def archive_images(dir_name, date_to): "", "", visible_num, - last_date_from + last_date_from, + #gradio.update(visible=batch_count > 1) ) - - - def delete_image(delete_num, name, filenames, image_index, visible_num): if name == "": return filenames, delete_num @@ -295,16 +296,16 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): with gr.Column() as page_panel: with gr.Row(): - img_path = gr.Textbox(dir_name, label="Images directory", placeholder="Input images directory") + img_path = gr.Textbox(dir_name, label="Images directory", placeholder="Input images directory", interactive=custom_dir) with gr.Row(visible=False) as warning: warning_box = gr.Textbox("Message", interactive=False) with gr.Row(visible=not custom_dir, elem_id=tabname + "_images_history") as main_panel: with gr.Column(scale=2): - with gr.Row(): - backward = gr.Button('Backward') - date_to = gr.Dropdown(label="Date to") - forward = gr.Button('Forward') + with gr.Row() as batch_panel: + forward = gr.Button('Forward') + date_to = gr.Dropdown(label="Date to") + backward = gr.Button('Backward') newest = gr.Button('Reload', elem_id=tabname + "_images_history_start") with gr.Row(): load_info = gr.Textbox(show_label=False, interactive=False) @@ -335,7 +336,7 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): # hiden items - with gr.Row(): #visible=False): + with gr.Row(visible=False): visible_img_num = gr.Number() date_to_recorder = gr.State([]) last_date_from = gr.Textbox() -- cgit v1.2.1 From 538bc89c269743e56b07ef2b471d1ce0a39b6776 Mon Sep 17 00:00:00 2001 From: yfszzx Date: Wed, 19 Oct 2022 11:27:51 +0800 Subject: Image browser improved --- modules/images_history.py | 135 +++++++++++++++++++++++++--------------------- modules/shared.py | 5 ++ modules/ui.py | 2 +- 3 files changed, 80 insertions(+), 62 deletions(-) (limited to 'modules') diff --git a/modules/images_history.py b/modules/images_history.py index a40cdc0e..78fd0543 100644 --- a/modules/images_history.py +++ b/modules/images_history.py @@ -4,7 +4,9 @@ import time import hashlib import gradio system_bak_path = "webui_log_and_bak" -browser_tabname = "custom" +custom_tab_name = "custom fold" +faverate_tab_name = "favorites" +tabs_list = ["txt2img", "img2img", "extras", faverate_tab_name] def is_valid_date(date): try: time.strptime(date, "%Y%m%d") @@ -122,7 +124,6 @@ def archive_images(dir_name, date_to): else: filenames = traverse_all_files(dir_name, filenames) total_num = len(filenames) - batch_count = len(filenames) + 1 // batch_size + 1 tmparray = [(os.path.getmtime(file), file) for file in filenames ] date_stamp = time.mktime(time.strptime(date_to, "%Y%m%d")) + 86400 filenames = [] @@ -146,10 +147,12 @@ def archive_images(dir_name, date_to): last_date_from = date_to_bak if num == 0 else time.strftime("%Y%m%d", time.localtime(time.mktime(time.strptime(date, "%Y%m%d")) - 1000)) date = date[:4] + "/" + date[4:6] + "/" + date[6:8] date_to_bak = date_to_bak[:4] + "/" + date_to_bak[4:6] + "/" + date_to_bak[6:8] - load_info = f"{total_num} images in this directory. Loaded {num} images during {date} - {date_to_bak}, divided into {int((num + 1) // opts.images_history_num_per_page + 1)} pages" + load_info = "
" + load_info += f"{total_num} images in this directory. Loaded {num} images during {date} - {date_to_bak}, divided into {int((num + 1) // opts.images_history_num_per_page + 1)} pages" + load_info += "
" _, image_list, _, _, visible_num = get_recent_images(1, 0, filenames) return ( - gradio.Dropdown.update(choices=date_list, value=date_to), + date_to, load_info, filenames, 1, @@ -158,7 +161,7 @@ def archive_images(dir_name, date_to): "", visible_num, last_date_from, - #gradio.update(visible=batch_count > 1) + gradio.update(visible=total_num > num) ) def delete_image(delete_num, name, filenames, image_index, visible_num): @@ -209,7 +212,7 @@ def get_recent_images(page_index, step, filenames): visible_num = num_of_imgs_per_page if visible_num == 0 else visible_num return page_index, image_list, "", "", visible_num -def newest_click(date_to): +def loac_batch_click(date_to): if date_to is None: return time.strftime("%Y%m%d",time.localtime(time.time())), [] else: @@ -248,7 +251,7 @@ def page_index_change(page_index, filenames): def show_image_info(tabname_box, num, page_index, filenames): file = filenames[int(num) + int((page_index - 1) * int(opts.images_history_num_per_page))] - tm = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(os.path.getmtime(file))) + tm = "
" + time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(os.path.getmtime(file))) + "
" return file, tm, num, file def enable_page_buttons(): @@ -268,9 +271,9 @@ def change_dir(img_dir, date_to): warning = "The format of the directory is incorrect" if warning is None: today = time.strftime("%Y%m%d",time.localtime(time.time())) - return gradio.update(visible=False), gradio.update(visible=True), None, None if date_to != today else today + return gradio.update(visible=False), gradio.update(visible=True), None, None if date_to != today else today, gradio.update(visible=True), gradio.update(visible=True) else: - return gradio.update(visible=True), gradio.update(visible=False), warning, date_to + return gradio.update(visible=True), gradio.update(visible=False), warning, date_to, gradio.update(visible=False), gradio.update(visible=False) def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): custom_dir = False @@ -280,7 +283,7 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): dir_name = opts.outdir_img2img_samples elif tabname == "extras": dir_name = opts.outdir_extras_samples - elif tabname == "saved": + elif tabname == faverate_tab_name: dir_name = opts.outdir_save else: custom_dir = True @@ -295,22 +298,26 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): os.makedirs(dir_name) with gr.Column() as page_panel: - with gr.Row(): - img_path = gr.Textbox(dir_name, label="Images directory", placeholder="Input images directory", interactive=custom_dir) + with gr.Row(): + with gr.Column(scale=1, visible=not custom_dir) as load_batch_box: + load_batch = gr.Button('Load', elem_id=tabname + "_images_history_start", full_width=True) + with gr.Column(scale=4): + with gr.Row(): + img_path = gr.Textbox(dir_name, label="Images directory", placeholder="Input images directory", interactive=custom_dir) + with gr.Row(): + with gr.Column(visible=False, scale=1) as batch_panel: + with gr.Row(): + forward = gr.Button('Prev batch') + backward = gr.Button('Next batch') + with gr.Column(scale=3): + load_info = gr.HTML(visible=not custom_dir) with gr.Row(visible=False) as warning: warning_box = gr.Textbox("Message", interactive=False) with gr.Row(visible=not custom_dir, elem_id=tabname + "_images_history") as main_panel: - with gr.Column(scale=2): - with gr.Row() as batch_panel: - forward = gr.Button('Forward') - date_to = gr.Dropdown(label="Date to") - backward = gr.Button('Backward') - newest = gr.Button('Reload', elem_id=tabname + "_images_history_start") - with gr.Row(): - load_info = gr.Textbox(show_label=False, interactive=False) - with gr.Row(visible=False) as turn_page_buttons: - renew_page = gr.Button('Refresh page', elem_id=tabname + "_images_history_renew_page") + with gr.Column(scale=2): + with gr.Row(visible=True) as turn_page_buttons: + #date_to = gr.Dropdown(label="Date to") first_page = gr.Button('First Page') prev_page = gr.Button('Prev Page') page_index = gr.Number(value=1, label="Page Index") @@ -322,50 +329,54 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): delete_num = gr.Number(value=1, interactive=True, label="number of images to delete consecutively next") delete = gr.Button('Delete', elem_id=tabname + "_images_history_del_button") - with gr.Column(): - with gr.Row(): - if tabname != "saved": - save_btn = gr.Button('Save') - pnginfo_send_to_txt2img = gr.Button('Send to txt2img') - pnginfo_send_to_img2img = gr.Button('Send to img2img') + with gr.Column(): with gr.Row(): with gr.Column(): - img_file_info = gr.Textbox(label="Generate Info", interactive=False) + img_file_info = gr.Textbox(label="Generate Info", interactive=False, lines=6) + gr.HTML("
") img_file_name = gr.Textbox(value="", label="File Name", interactive=False) - img_file_time= gr.Textbox(value="", label="Create Time", interactive=False) - + img_file_time= gr.HTML() + with gr.Row(): + if tabname != faverate_tab_name: + save_btn = gr.Button('Collect') + pnginfo_send_to_txt2img = gr.Button('Send to txt2img') + pnginfo_send_to_img2img = gr.Button('Send to img2img') + - # hiden items - with gr.Row(visible=False): - visible_img_num = gr.Number() - date_to_recorder = gr.State([]) - last_date_from = gr.Textbox() - tabname_box = gr.Textbox(tabname) - image_index = gr.Textbox(value=-1) - set_index = gr.Button('set_index', elem_id=tabname + "_images_history_set_index") - filenames = gr.State() - all_images_list = gr.State() - hidden = gr.Image(type="pil") - info1 = gr.Textbox() - info2 = gr.Textbox() - - img_path.submit(change_dir, inputs=[img_path, date_to], outputs=[warning, main_panel, warning_box, date_to]) - #change date - change_date_output = [date_to, load_info, filenames, page_index, history_gallery, img_file_name, img_file_time, visible_img_num, last_date_from] + # hiden items + with gr.Row(visible=False): + renew_page = gr.Button('Refresh page', elem_id=tabname + "_images_history_renew_page") + batch_date_to = gr.Textbox(label="Date to") + visible_img_num = gr.Number() + date_to_recorder = gr.State([]) + last_date_from = gr.Textbox() + tabname_box = gr.Textbox(tabname) + image_index = gr.Textbox(value=-1) + set_index = gr.Button('set_index', elem_id=tabname + "_images_history_set_index") + filenames = gr.State() + all_images_list = gr.State() + hidden = gr.Image(type="pil") + info1 = gr.Textbox() + info2 = gr.Textbox() + + img_path.submit(change_dir, inputs=[img_path, batch_date_to], outputs=[warning, main_panel, warning_box, batch_date_to, load_batch_box, load_info]) + + #change batch + change_date_output = [batch_date_to, load_info, filenames, page_index, history_gallery, img_file_name, img_file_time, visible_img_num, last_date_from, batch_panel] - date_to.change(archive_images, inputs=[img_path, date_to], outputs=change_date_output) - date_to.change(enable_page_buttons, inputs=None, outputs=[turn_page_buttons]) - date_to.change(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") + batch_date_to.change(archive_images, inputs=[img_path, batch_date_to], outputs=change_date_output) + batch_date_to.change(enable_page_buttons, inputs=None, outputs=[turn_page_buttons]) + batch_date_to.change(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") - newest.click(newest_click, inputs=[date_to], outputs=[date_to, date_to_recorder]) - forward.click(forward_click, inputs=[last_date_from, date_to_recorder], outputs=[date_to, date_to_recorder]) - backward.click(backward_click, inputs=[last_date_from, date_to_recorder], outputs=[date_to, date_to_recorder]) + load_batch.click(loac_batch_click, inputs=[batch_date_to], outputs=[batch_date_to, date_to_recorder]) + forward.click(forward_click, inputs=[last_date_from, date_to_recorder], outputs=[batch_date_to, date_to_recorder]) + backward.click(backward_click, inputs=[last_date_from, date_to_recorder], outputs=[batch_date_to, date_to_recorder]) #delete delete.click(delete_image, inputs=[delete_num, img_file_name, filenames, image_index, visible_img_num], outputs=[filenames, delete_num, visible_img_num]) delete.click(fn=None, _js="images_history_delete", inputs=[delete_num, tabname_box, image_index], outputs=None) - if tabname != "saved": + if tabname != faverate_tab_name: save_btn.click(save_image, inputs=[img_file_name], outputs=None) #turn page @@ -394,18 +405,20 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): -def create_history_tabs(gr, sys_opts, run_pnginfo, switch_dict): +def create_history_tabs(gr, sys_opts, cmp_ops, run_pnginfo, switch_dict): global opts; opts = sys_opts loads_files_num = int(opts.images_history_num_per_page) num_of_imgs_per_page = int(opts.images_history_num_per_page * opts.images_history_pages_num) + if cmp_ops.browse_all_images: + tabs_list.append(custom_tab_name) with gr.Blocks(analytics_enabled=False) as images_history: with gr.Tabs() as tabs: - for tab in [browser_tabname, "txt2img", "img2img", "extras", "saved"]: + for tab in tabs_list: with gr.Tab(tab): with gr.Blocks(analytics_enabled=False) : - show_images_history(gr, opts, tab, run_pnginfo, switch_dict) - #gradio.Checkbox(opts.images_history_reconstruct_directory, elem_id="images_history_reconstruct_directory", visible=False) - gradio.Checkbox(opts.images_history_reconstruct_directory, elem_id="images_history_finish_render", visible=False) - + show_images_history(gr, opts, tab, run_pnginfo, switch_dict) + gradio.Checkbox(opts.images_history_preload, elem_id="images_history_preload", visible=False) + gradio.Textbox(",".join(tabs_list), elem_id="images_history_tabnames_list", visible=False) + return images_history diff --git a/modules/shared.py b/modules/shared.py index 1811018d..4d735414 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -74,6 +74,10 @@ parser.add_argument("--disable-console-progressbars", action='store_true', help= parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False) parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None) parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False) +parser.add_argument("--browse-all-images", action='store_true', help="Allow browsing all images by Image Browser", default=False) + + +cmd_opts = parser.parse_args() cmd_opts = parser.parse_args() @@ -311,6 +315,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" options_templates.update(options_section(('images-history', "Images Browser"), { #"images_history_reconstruct_directory": OptionInfo(False, "Reconstruct output directory structure.This can greatly improve the speed of loading , but will change the original output directory structure"), + "images_history_preload": OptionInfo(False, "Preload images at startup"), "images_history_num_per_page": OptionInfo(36, "Number of pictures displayed on each page"), "images_history_pages_num": OptionInfo(6, "Minimum number of pages per load "), "images_history_grid_num": OptionInfo(6, "Number of grids in each row"), diff --git a/modules/ui.py b/modules/ui.py index 85abac4d..88f46659 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1150,7 +1150,7 @@ def create_ui(wrap_gradio_gpu_call): "i2i":img2img_paste_fields } - images_history = img_his.create_history_tabs(gr, opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict) + images_history = img_his.create_history_tabs(gr, opts, cmd_opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict) with gr.Blocks() as modelmerger_interface: with gr.Row().style(equal_height=False): -- cgit v1.2.1 From abeec4b63029c2c4151a78fc395d312113881845 Mon Sep 17 00:00:00 2001 From: captin411 Date: Wed, 19 Oct 2022 03:18:26 -0700 Subject: Add auto focal point cropping to Preprocess images This algorithm plots a bunch of points of interest on the source image and averages their locations to find a center. Most points come from OpenCV. One point comes from an entropy model. OpenCV points account for 50% of the weight and the entropy based point is the other 50%. The center of all weighted points is calculated and a bounding box is drawn as close to centered over that point as possible. --- modules/textual_inversion/preprocess.py | 151 ++++++++++++++++++++++++++++++-- 1 file changed, 146 insertions(+), 5 deletions(-) (limited to 'modules') diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 886cf0c3..168bfb09 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -1,5 +1,7 @@ import os -from PIL import Image, ImageOps +import cv2 +import numpy as np +from PIL import Image, ImageOps, ImageDraw import platform import sys import tqdm @@ -11,7 +13,7 @@ if cmd_opts.deepdanbooru: import modules.deepbooru as deepbooru -def preprocess(process_src, process_dst, process_width, process_height, process_flip, process_split, process_caption, process_caption_deepbooru=False): +def preprocess(process_src, process_dst, process_width, process_height, process_flip, process_split, process_caption, process_caption_deepbooru=False, process_entropy_focus=False): try: if process_caption: shared.interrogator.load() @@ -21,7 +23,7 @@ def preprocess(process_src, process_dst, process_width, process_height, process_ db_opts[deepbooru.OPT_INCLUDE_RANKS] = False deepbooru.create_deepbooru_process(opts.interrogate_deepbooru_score_threshold, db_opts) - preprocess_work(process_src, process_dst, process_width, process_height, process_flip, process_split, process_caption, process_caption_deepbooru) + preprocess_work(process_src, process_dst, process_width, process_height, process_flip, process_split, process_caption, process_caption_deepbooru, process_entropy_focus) finally: @@ -33,7 +35,7 @@ def preprocess(process_src, process_dst, process_width, process_height, process_ -def preprocess_work(process_src, process_dst, process_width, process_height, process_flip, process_split, process_caption, process_caption_deepbooru=False): +def preprocess_work(process_src, process_dst, process_width, process_height, process_flip, process_split, process_caption, process_caption_deepbooru=False, process_entropy_focus=False): width = process_width height = process_height src = os.path.abspath(process_src) @@ -93,6 +95,8 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pro is_tall = ratio > 1.35 is_wide = ratio < 1 / 1.35 + processing_option_ran = False + if process_split and is_tall: img = img.resize((width, height * img.height // img.width)) @@ -101,6 +105,8 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pro bot = img.crop((0, img.height - height, width, img.height)) save_pic(bot, index) + + processing_option_ran = True elif process_split and is_wide: img = img.resize((width * img.width // img.height, height)) @@ -109,8 +115,143 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pro right = img.crop((img.width - width, 0, img.width, height)) save_pic(right, index) - else: + + processing_option_ran = True + + if process_entropy_focus and (is_tall or is_wide): + if is_tall: + img = img.resize((width, height * img.height // img.width)) + else: + img = img.resize((width * img.width // img.height, height)) + + x_focal_center, y_focal_center = image_central_focal_point(img, width, height) + + # take the focal point and turn it into crop coordinates that try to center over the focal + # point but then get adjusted back into the frame + y_half = int(height / 2) + x_half = int(width / 2) + + x1 = x_focal_center - x_half + if x1 < 0: + x1 = 0 + elif x1 + width > img.width: + x1 = img.width - width + + y1 = y_focal_center - y_half + if y1 < 0: + y1 = 0 + elif y1 + height > img.height: + y1 = img.height - height + + x2 = x1 + width + y2 = y1 + height + + crop = [x1, y1, x2, y2] + + focal = img.crop(tuple(crop)) + save_pic(focal, index) + + processing_option_ran = True + + if not processing_option_ran: img = images.resize_image(1, img, width, height) save_pic(img, index) shared.state.nextjob() + + +def image_central_focal_point(im, target_width, target_height): + focal_points = [] + + focal_points.extend( + image_focal_points(im) + ) + + fp_entropy = image_entropy_point(im, target_width, target_height) + fp_entropy['weight'] = len(focal_points) + 1 # about half of the weight to entropy + + focal_points.append(fp_entropy) + + weight = 0.0 + x = 0.0 + y = 0.0 + for focal_point in focal_points: + weight += focal_point['weight'] + x += focal_point['x'] * focal_point['weight'] + y += focal_point['y'] * focal_point['weight'] + avg_x = round(x // weight) + avg_y = round(y // weight) + + return avg_x, avg_y + + +def image_focal_points(im): + grayscale = im.convert("L") + + # naive attempt at preventing focal points from collecting at watermarks near the bottom + gd = ImageDraw.Draw(grayscale) + gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999") + + np_im = np.array(grayscale) + + points = cv2.goodFeaturesToTrack( + np_im, + maxCorners=50, + qualityLevel=0.04, + minDistance=min(grayscale.width, grayscale.height)*0.05, + useHarrisDetector=False, + ) + + if points is None: + return [] + + focal_points = [] + for point in points: + x, y = point.ravel() + focal_points.append({ + 'x': x, + 'y': y, + 'weight': 1.0 + }) + + return focal_points + + +def image_entropy_point(im, crop_width, crop_height): + img = im.copy() + # just make it easier to slide the test crop with images oriented the same way + if (img.size[0] < img.size[1]): + portrait = True + img = img.rotate(90, expand=1) + + e_max = 0 + crop_current = [0, 0, crop_width, crop_height] + crop_best = crop_current + while crop_current[2] < img.size[0]: + crop = img.crop(tuple(crop_current)) + e = image_entropy(crop) + + if (e_max < e): + e_max = e + crop_best = list(crop_current) + + crop_current[0] += 4 + crop_current[2] += 4 + + x_mid = int((crop_best[2] - crop_best[0])/2) + y_mid = int((crop_best[3] - crop_best[1])/2) + + return { + 'x': x_mid, + 'y': y_mid, + 'weight': 1.0 + } + + +def image_entropy(im): + # greyscale image entropy + band = np.asarray(im.convert("L")) + hist, _ = np.histogram(band, bins=range(0, 256)) + hist = hist[hist > 0] + return -np.log2(hist / hist.sum()).sum() + -- cgit v1.2.1 From 087609ee181a91a523647435ffffa6288a317e2f Mon Sep 17 00:00:00 2001 From: captin411 Date: Wed, 19 Oct 2022 03:19:35 -0700 Subject: UI changes for focal point image cropping --- modules/ui.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index 1ff7eb4f..b6be713b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1234,6 +1234,7 @@ def create_ui(wrap_gradio_gpu_call): with gr.Row(): process_flip = gr.Checkbox(label='Create flipped copies') process_split = gr.Checkbox(label='Split oversized images into two') + process_entropy_focus = gr.Checkbox(label='Create auto focal point crop') process_caption = gr.Checkbox(label='Use BLIP for caption') process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True if cmd_opts.deepdanbooru else False) @@ -1318,7 +1319,8 @@ def create_ui(wrap_gradio_gpu_call): process_flip, process_split, process_caption, - process_caption_deepbooru + process_caption_deepbooru, + process_entropy_focus ], outputs=[ ti_output, -- cgit v1.2.1 From 41e3877be2c667316515c86037413763eb0ba4da Mon Sep 17 00:00:00 2001 From: captin411 Date: Wed, 19 Oct 2022 13:44:59 -0700 Subject: fix entropy point calculation --- modules/textual_inversion/preprocess.py | 34 ++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) (limited to 'modules') diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 168bfb09..7c1a594e 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -196,9 +196,9 @@ def image_focal_points(im): points = cv2.goodFeaturesToTrack( np_im, - maxCorners=50, + maxCorners=100, qualityLevel=0.04, - minDistance=min(grayscale.width, grayscale.height)*0.05, + minDistance=min(grayscale.width, grayscale.height)*0.07, useHarrisDetector=False, ) @@ -218,28 +218,32 @@ def image_focal_points(im): def image_entropy_point(im, crop_width, crop_height): - img = im.copy() - # just make it easier to slide the test crop with images oriented the same way - if (img.size[0] < img.size[1]): - portrait = True - img = img.rotate(90, expand=1) + landscape = im.height < im.width + portrait = im.height > im.width + if landscape: + move_idx = [0, 2] + move_max = im.size[0] + elif portrait: + move_idx = [1, 3] + move_max = im.size[1] e_max = 0 crop_current = [0, 0, crop_width, crop_height] crop_best = crop_current - while crop_current[2] < img.size[0]: - crop = img.crop(tuple(crop_current)) + while crop_current[move_idx[1]] < move_max: + crop = im.crop(tuple(crop_current)) e = image_entropy(crop) - if (e_max < e): + if (e > e_max): e_max = e crop_best = list(crop_current) - crop_current[0] += 4 - crop_current[2] += 4 + crop_current[move_idx[0]] += 4 + crop_current[move_idx[1]] += 4 + + x_mid = int(crop_best[0] + crop_width/2) + y_mid = int(crop_best[1] + crop_height/2) - x_mid = int((crop_best[2] - crop_best[0])/2) - y_mid = int((crop_best[3] - crop_best[1])/2) return { 'x': x_mid, @@ -250,7 +254,7 @@ def image_entropy_point(im, crop_width, crop_height): def image_entropy(im): # greyscale image entropy - band = np.asarray(im.convert("L")) + band = np.asarray(im.convert("1")) hist, _ = np.histogram(band, bins=range(0, 256)) hist = hist[hist > 0] return -np.log2(hist / hist.sum()).sum() -- cgit v1.2.1 From 59ed74438318af893d2cba552b0e28dbc2a9266c Mon Sep 17 00:00:00 2001 From: captin411 Date: Wed, 19 Oct 2022 17:19:02 -0700 Subject: face detection algo, configurability, reusability Try to move the crop in the direction of a face if it is present More internal configuration options for choosing weights of each of the algorithm's findings Move logic into its module --- modules/textual_inversion/autocrop.py | 216 ++++++++++++++++++++++++++++++++ modules/textual_inversion/preprocess.py | 150 +++------------------- 2 files changed, 230 insertions(+), 136 deletions(-) create mode 100644 modules/textual_inversion/autocrop.py (limited to 'modules') diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py new file mode 100644 index 00000000..f858a958 --- /dev/null +++ b/modules/textual_inversion/autocrop.py @@ -0,0 +1,216 @@ +import cv2 +from collections import defaultdict +from math import log, sqrt +import numpy as np +from PIL import Image, ImageDraw + +GREEN = "#0F0" +BLUE = "#00F" +RED = "#F00" + +def crop_image(im, settings): + """ Intelligently crop an image to the subject matter """ + if im.height > im.width: + im = im.resize((settings.crop_width, settings.crop_height * im.height // im.width)) + else: + im = im.resize((settings.crop_width * im.width // im.height, settings.crop_height)) + + focus = focal_point(im, settings) + + # take the focal point and turn it into crop coordinates that try to center over the focal + # point but then get adjusted back into the frame + y_half = int(settings.crop_height / 2) + x_half = int(settings.crop_width / 2) + + x1 = focus.x - x_half + if x1 < 0: + x1 = 0 + elif x1 + settings.crop_width > im.width: + x1 = im.width - settings.crop_width + + y1 = focus.y - y_half + if y1 < 0: + y1 = 0 + elif y1 + settings.crop_height > im.height: + y1 = im.height - settings.crop_height + + x2 = x1 + settings.crop_width + y2 = y1 + settings.crop_height + + crop = [x1, y1, x2, y2] + + if settings.annotate_image: + d = ImageDraw.Draw(im) + rect = list(crop) + rect[2] -= 1 + rect[3] -= 1 + d.rectangle(rect, outline=GREEN) + if settings.destop_view_image: + im.show() + + return im.crop(tuple(crop)) + +def focal_point(im, settings): + corner_points = image_corner_points(im, settings) + entropy_points = image_entropy_points(im, settings) + face_points = image_face_points(im, settings) + + total_points = len(corner_points) + len(entropy_points) + len(face_points) + + corner_weight = settings.corner_points_weight + entropy_weight = settings.entropy_points_weight + face_weight = settings.face_points_weight + + weight_pref_total = corner_weight + entropy_weight + face_weight + + # weight things + pois = [] + if weight_pref_total == 0 or total_points == 0: + return pois + + pois.extend( + [ PointOfInterest( p.x, p.y, weight=p.weight * ( (corner_weight/weight_pref_total) / (len(corner_points)/total_points) )) for p in corner_points ] + ) + pois.extend( + [ PointOfInterest( p.x, p.y, weight=p.weight * ( (entropy_weight/weight_pref_total) / (len(entropy_points)/total_points) )) for p in entropy_points ] + ) + pois.extend( + [ PointOfInterest( p.x, p.y, weight=p.weight * ( (face_weight/weight_pref_total) / (len(face_points)/total_points) )) for p in face_points ] + ) + + if settings.annotate_image: + d = ImageDraw.Draw(im) + + average_point = poi_average(pois, settings, im=im) + + if settings.annotate_image: + d.ellipse([average_point.x - 25, average_point.y - 25, average_point.x + 25, average_point.y + 25], outline=GREEN) + + return average_point + + +def image_face_points(im, settings): + np_im = np.array(im) + gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY) + classifier = cv2.CascadeClassifier(f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml') + + minsize = int(min(im.width, im.height) * 0.15) # at least N percent of the smallest side + faces = classifier.detectMultiScale(gray, scaleFactor=1.05, + minNeighbors=5, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE) + + if len(faces) == 0: + return [] + + rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces] + if settings.annotate_image: + for f in rects: + d = ImageDraw.Draw(im) + d.rectangle(f, outline=RED) + + return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2) for r in rects] + + +def image_corner_points(im, settings): + grayscale = im.convert("L") + + # naive attempt at preventing focal points from collecting at watermarks near the bottom + gd = ImageDraw.Draw(grayscale) + gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999") + + np_im = np.array(grayscale) + + points = cv2.goodFeaturesToTrack( + np_im, + maxCorners=100, + qualityLevel=0.04, + minDistance=min(grayscale.width, grayscale.height)*0.07, + useHarrisDetector=False, + ) + + if points is None: + return [] + + focal_points = [] + for point in points: + x, y = point.ravel() + focal_points.append(PointOfInterest(x, y)) + + return focal_points + + +def image_entropy_points(im, settings): + landscape = im.height < im.width + portrait = im.height > im.width + if landscape: + move_idx = [0, 2] + move_max = im.size[0] + elif portrait: + move_idx = [1, 3] + move_max = im.size[1] + else: + return [] + + e_max = 0 + crop_current = [0, 0, settings.crop_width, settings.crop_height] + crop_best = crop_current + while crop_current[move_idx[1]] < move_max: + crop = im.crop(tuple(crop_current)) + e = image_entropy(crop) + + if (e > e_max): + e_max = e + crop_best = list(crop_current) + + crop_current[move_idx[0]] += 4 + crop_current[move_idx[1]] += 4 + + x_mid = int(crop_best[0] + settings.crop_width/2) + y_mid = int(crop_best[1] + settings.crop_height/2) + + return [PointOfInterest(x_mid, y_mid)] + + +def image_entropy(im): + # greyscale image entropy + band = np.asarray(im.convert("1")) + hist, _ = np.histogram(band, bins=range(0, 256)) + hist = hist[hist > 0] + return -np.log2(hist / hist.sum()).sum() + + +def poi_average(pois, settings, im=None): + weight = 0.0 + x = 0.0 + y = 0.0 + for pois in pois: + if settings.annotate_image and im is not None: + w = 4 * 0.5 * sqrt(pois.weight) + d = ImageDraw.Draw(im) + d.ellipse([ + pois.x - w, pois.y - w, + pois.x + w, pois.y + w ], fill=BLUE) + weight += pois.weight + x += pois.x * pois.weight + y += pois.y * pois.weight + avg_x = round(x / weight) + avg_y = round(y / weight) + + return PointOfInterest(avg_x, avg_y) + + +class PointOfInterest: + def __init__(self, x, y, weight=1.0): + self.x = x + self.y = y + self.weight = weight + + +class Settings: + def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False): + self.crop_width = crop_width + self.crop_height = crop_height + self.corner_points_weight = corner_points_weight + self.entropy_points_weight = entropy_points_weight + self.face_points_weight = entropy_points_weight + self.annotate_image = annotate_image + self.destop_view_image = False \ No newline at end of file diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 7c1a594e..0c79f012 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -1,7 +1,5 @@ import os -import cv2 -import numpy as np -from PIL import Image, ImageOps, ImageDraw +from PIL import Image, ImageOps import platform import sys import tqdm @@ -9,6 +7,7 @@ import time from modules import shared, images from modules.shared import opts, cmd_opts +from modules.textual_inversion import autocrop if cmd_opts.deepdanbooru: import modules.deepbooru as deepbooru @@ -80,6 +79,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pro if process_flip: save_pic_with_caption(ImageOps.mirror(image), index) + for index, imagefile in enumerate(tqdm.tqdm(files)): subindex = [0] filename = os.path.join(src, imagefile) @@ -118,37 +118,16 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pro processing_option_ran = True - if process_entropy_focus and (is_tall or is_wide): - if is_tall: - img = img.resize((width, height * img.height // img.width)) - else: - img = img.resize((width * img.width // img.height, height)) - - x_focal_center, y_focal_center = image_central_focal_point(img, width, height) - - # take the focal point and turn it into crop coordinates that try to center over the focal - # point but then get adjusted back into the frame - y_half = int(height / 2) - x_half = int(width / 2) - - x1 = x_focal_center - x_half - if x1 < 0: - x1 = 0 - elif x1 + width > img.width: - x1 = img.width - width - - y1 = y_focal_center - y_half - if y1 < 0: - y1 = 0 - elif y1 + height > img.height: - y1 = img.height - height - - x2 = x1 + width - y2 = y1 + height - - crop = [x1, y1, x2, y2] - - focal = img.crop(tuple(crop)) + if process_entropy_focus and img.height != img.width: + autocrop_settings = autocrop.Settings( + crop_width = width, + crop_height = height, + face_points_weight = 0.9, + entropy_points_weight = 0.7, + corner_points_weight = 0.5, + annotate_image = False + ) + focal = autocrop.crop_image(img, autocrop_settings) save_pic(focal, index) processing_option_ran = True @@ -157,105 +136,4 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pro img = images.resize_image(1, img, width, height) save_pic(img, index) - shared.state.nextjob() - - -def image_central_focal_point(im, target_width, target_height): - focal_points = [] - - focal_points.extend( - image_focal_points(im) - ) - - fp_entropy = image_entropy_point(im, target_width, target_height) - fp_entropy['weight'] = len(focal_points) + 1 # about half of the weight to entropy - - focal_points.append(fp_entropy) - - weight = 0.0 - x = 0.0 - y = 0.0 - for focal_point in focal_points: - weight += focal_point['weight'] - x += focal_point['x'] * focal_point['weight'] - y += focal_point['y'] * focal_point['weight'] - avg_x = round(x // weight) - avg_y = round(y // weight) - - return avg_x, avg_y - - -def image_focal_points(im): - grayscale = im.convert("L") - - # naive attempt at preventing focal points from collecting at watermarks near the bottom - gd = ImageDraw.Draw(grayscale) - gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999") - - np_im = np.array(grayscale) - - points = cv2.goodFeaturesToTrack( - np_im, - maxCorners=100, - qualityLevel=0.04, - minDistance=min(grayscale.width, grayscale.height)*0.07, - useHarrisDetector=False, - ) - - if points is None: - return [] - - focal_points = [] - for point in points: - x, y = point.ravel() - focal_points.append({ - 'x': x, - 'y': y, - 'weight': 1.0 - }) - - return focal_points - - -def image_entropy_point(im, crop_width, crop_height): - landscape = im.height < im.width - portrait = im.height > im.width - if landscape: - move_idx = [0, 2] - move_max = im.size[0] - elif portrait: - move_idx = [1, 3] - move_max = im.size[1] - - e_max = 0 - crop_current = [0, 0, crop_width, crop_height] - crop_best = crop_current - while crop_current[move_idx[1]] < move_max: - crop = im.crop(tuple(crop_current)) - e = image_entropy(crop) - - if (e > e_max): - e_max = e - crop_best = list(crop_current) - - crop_current[move_idx[0]] += 4 - crop_current[move_idx[1]] += 4 - - x_mid = int(crop_best[0] + crop_width/2) - y_mid = int(crop_best[1] + crop_height/2) - - - return { - 'x': x_mid, - 'y': y_mid, - 'weight': 1.0 - } - - -def image_entropy(im): - # greyscale image entropy - band = np.asarray(im.convert("1")) - hist, _ = np.histogram(band, bins=range(0, 256)) - hist = hist[hist > 0] - return -np.log2(hist / hist.sum()).sum() - + shared.state.nextjob() \ No newline at end of file -- cgit v1.2.1 From 0ddaf8d2028a7251e8c4ad93551a43b5d4700841 Mon Sep 17 00:00:00 2001 From: captin411 Date: Thu, 20 Oct 2022 00:34:55 -0700 Subject: improve face detection a lot --- modules/textual_inversion/autocrop.py | 99 ++++++++++++++++++++++------------- 1 file changed, 62 insertions(+), 37 deletions(-) (limited to 'modules') diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index f858a958..5a551c25 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -8,12 +8,18 @@ GREEN = "#0F0" BLUE = "#00F" RED = "#F00" + def crop_image(im, settings): """ Intelligently crop an image to the subject matter """ if im.height > im.width: im = im.resize((settings.crop_width, settings.crop_height * im.height // im.width)) - else: + elif im.width > im.height: im = im.resize((settings.crop_width * im.width // im.height, settings.crop_height)) + else: + im = im.resize((settings.crop_width, settings.crop_height)) + + if im.height == im.width: + return im focus = focal_point(im, settings) @@ -78,13 +84,18 @@ def focal_point(im, settings): [ PointOfInterest( p.x, p.y, weight=p.weight * ( (face_weight/weight_pref_total) / (len(face_points)/total_points) )) for p in face_points ] ) - if settings.annotate_image: - d = ImageDraw.Draw(im) - - average_point = poi_average(pois, settings, im=im) + average_point = poi_average(pois, settings) if settings.annotate_image: - d.ellipse([average_point.x - 25, average_point.y - 25, average_point.x + 25, average_point.y + 25], outline=GREEN) + d = ImageDraw.Draw(im) + for f in face_points: + d.rectangle(f.bounding(f.size), outline=RED) + for f in entropy_points: + d.rectangle(f.bounding(30), outline=BLUE) + for poi in pois: + w = max(4, 4 * 0.5 * sqrt(poi.weight)) + d.ellipse(poi.bounding(w), fill=BLUE) + d.ellipse(average_point.bounding(25), outline=GREEN) return average_point @@ -92,22 +103,32 @@ def focal_point(im, settings): def image_face_points(im, settings): np_im = np.array(im) gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY) - classifier = cv2.CascadeClassifier(f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml') - - minsize = int(min(im.width, im.height) * 0.15) # at least N percent of the smallest side - faces = classifier.detectMultiScale(gray, scaleFactor=1.05, - minNeighbors=5, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE) - if len(faces) == 0: - return [] - - rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces] - if settings.annotate_image: - for f in rects: - d = ImageDraw.Draw(im) - d.rectangle(f, outline=RED) - - return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2) for r in rects] + tries = [ + [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ], + [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ], + [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ], + [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ], + [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ], + [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ], + [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ], + [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ] + ] + + for t in tries: + # print(t[0]) + classifier = cv2.CascadeClassifier(t[0]) + minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side + try: + faces = classifier.detectMultiScale(gray, scaleFactor=1.1, + minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE) + except: + continue + + if len(faces) > 0: + rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces] + return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2])) for r in rects] + return [] def image_corner_points(im, settings): @@ -132,8 +153,8 @@ def image_corner_points(im, settings): focal_points = [] for point in points: - x, y = point.ravel() - focal_points.append(PointOfInterest(x, y)) + x, y = point.ravel() + focal_points.append(PointOfInterest(x, y, size=4)) return focal_points @@ -167,31 +188,26 @@ def image_entropy_points(im, settings): x_mid = int(crop_best[0] + settings.crop_width/2) y_mid = int(crop_best[1] + settings.crop_height/2) - return [PointOfInterest(x_mid, y_mid)] + return [PointOfInterest(x_mid, y_mid, size=25)] def image_entropy(im): # greyscale image entropy - band = np.asarray(im.convert("1")) + # band = np.asarray(im.convert("L")) + band = np.asarray(im.convert("1"), dtype=np.uint8) hist, _ = np.histogram(band, bins=range(0, 256)) hist = hist[hist > 0] return -np.log2(hist / hist.sum()).sum() -def poi_average(pois, settings, im=None): +def poi_average(pois, settings): weight = 0.0 x = 0.0 y = 0.0 - for pois in pois: - if settings.annotate_image and im is not None: - w = 4 * 0.5 * sqrt(pois.weight) - d = ImageDraw.Draw(im) - d.ellipse([ - pois.x - w, pois.y - w, - pois.x + w, pois.y + w ], fill=BLUE) - weight += pois.weight - x += pois.x * pois.weight - y += pois.y * pois.weight + for poi in pois: + weight += poi.weight + x += poi.x * poi.weight + y += poi.y * poi.weight avg_x = round(x / weight) avg_y = round(y / weight) @@ -199,10 +215,19 @@ def poi_average(pois, settings, im=None): class PointOfInterest: - def __init__(self, x, y, weight=1.0): + def __init__(self, x, y, weight=1.0, size=10): self.x = x self.y = y self.weight = weight + self.size = size + + def bounding(self, size): + return [ + self.x - size//2, + self.y - size//2, + self.x + size//2, + self.y + size//2 + ] class Settings: -- cgit v1.2.1 From d8acd34f66ab35a91f10d66330bcc95a83bfcac6 Mon Sep 17 00:00:00 2001 From: AngelBottomless <35677394+aria1th@users.noreply.github.com> Date: Thu, 20 Oct 2022 23:43:03 +0900 Subject: generalized some functions and option for ignoring first layer --- modules/hypernetworks/hypernetwork.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 7d617680..3a44b377 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -21,21 +21,27 @@ from modules.textual_inversion.learn_schedule import LearnRateScheduler class HypernetworkModule(torch.nn.Module): multiplier = 1.0 - + activation_dict = {"relu": torch.nn.ReLU, "leakyrelu": torch.nn.LeakyReLU, "elu": torch.nn.ELU, + "swish": torch.nn.Hardswish} + def __init__(self, dim, state_dict=None, layer_structure=None, add_layer_norm=False, activation_func=None): super().__init__() assert layer_structure is not None, "layer_structure must not be None" assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!" assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!" - + linears = [] for i in range(len(layer_structure) - 1): linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1]))) - if activation_func == "relu": - linears.append(torch.nn.ReLU()) - if activation_func == "leakyrelu": - linears.append(torch.nn.LeakyReLU()) + # if skip_first_layer because first parameters potentially contain negative values + if i < 1: continue + if activation_func in HypernetworkModule.activation_dict: + linears.append(HypernetworkModule.activation_dict[activation_func]()) + else: + print("Invalid key {} encountered as activation function!".format(activation_func)) + # if use_dropout: + linears.append(torch.nn.Dropout(p=0.3)) if add_layer_norm: linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) @@ -46,7 +52,7 @@ class HypernetworkModule(torch.nn.Module): self.load_state_dict(state_dict) else: for layer in self.linear: - if not "ReLU" in layer.__str__(): + if isinstance(layer, torch.nn.Linear): layer.weight.data.normal_(mean=0.0, std=0.01) layer.bias.data.zero_() @@ -298,7 +304,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log return hypernetwork, filename scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) - optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate) + # if optimizer == "Adam": or else Adam / AdamW / etc... + optimizer = torch.optim.Adam(weights, lr=scheduler.learn_rate) pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) for i, entries in pbar: -- cgit v1.2.1 From a71e0212363979c7cbbb797c9fbd5f8cd03b29d3 Mon Sep 17 00:00:00 2001 From: AngelBottomless <35677394+aria1th@users.noreply.github.com> Date: Thu, 20 Oct 2022 23:48:52 +0900 Subject: only linear --- modules/hypernetworks/hypernetwork.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 3a44b377..905cbeef 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -35,13 +35,13 @@ class HypernetworkModule(torch.nn.Module): for i in range(len(layer_structure) - 1): linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1]))) # if skip_first_layer because first parameters potentially contain negative values - if i < 1: continue + # if i < 1: continue if activation_func in HypernetworkModule.activation_dict: linears.append(HypernetworkModule.activation_dict[activation_func]()) else: print("Invalid key {} encountered as activation function!".format(activation_func)) # if use_dropout: - linears.append(torch.nn.Dropout(p=0.3)) + # linears.append(torch.nn.Dropout(p=0.3)) if add_layer_norm: linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) @@ -80,7 +80,7 @@ class HypernetworkModule(torch.nn.Module): def trainables(self): layer_structure = [] for layer in self.linear: - if not "ReLU" in layer.__str__(): + if isinstance(layer, torch.nn.Linear): layer_structure += [layer.weight, layer.bias] return layer_structure @@ -304,8 +304,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log return hypernetwork, filename scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) - # if optimizer == "Adam": or else Adam / AdamW / etc... - optimizer = torch.optim.Adam(weights, lr=scheduler.learn_rate) + # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc... + optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate) pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) for i, entries in pbar: -- cgit v1.2.1 From d07cb46f34b3d9fe7a78b102f899ebef352ea56b Mon Sep 17 00:00:00 2001 From: yfszzx Date: Thu, 20 Oct 2022 23:58:52 +0800 Subject: inspiration pull request --- modules/inspiration.py | 122 +++++++++++++++++++++++++++++++++++++++++++++++++ modules/shared.py | 1 + modules/ui.py | 13 ++++-- 3 files changed, 131 insertions(+), 5 deletions(-) create mode 100644 modules/inspiration.py (limited to 'modules') diff --git a/modules/inspiration.py b/modules/inspiration.py new file mode 100644 index 00000000..456bfcb5 --- /dev/null +++ b/modules/inspiration.py @@ -0,0 +1,122 @@ +import os +import random +import gradio +inspiration_path = "inspiration" +inspiration_system_path = os.path.join(inspiration_path, "system") +def read_name_list(file): + if not os.path.exists(file): + return [] + f = open(file, "r") + ret = [] + line = f.readline() + while len(line) > 0: + line = line.rstrip("\n") + ret.append(line) + print(ret) + return ret + +def save_name_list(file, name): + print(file) + f = open(file, "a") + f.write(name + "\n") + +def get_inspiration_images(source, types): + path = os.path.join(inspiration_path , types) + if source == "Favorites": + names = read_name_list(os.path.join(inspiration_system_path, types + "_faverites.txt")) + names = random.sample(names, 25) + elif source == "Abandoned": + names = read_name_list(os.path.join(inspiration_system_path, types + "_abondened.txt")) + names = random.sample(names, 25) + elif source == "Exclude abandoned": + abondened = read_name_list(os.path.join(inspiration_system_path, types + "_abondened.txt")) + all_names = os.listdir(path) + names = [] + while len(names) < 25: + name = random.choice(all_names) + if name not in abondened: + names.append(name) + else: + names = random.sample(os.listdir(path), 25) + names = random.sample(names, 25) + image_list = [] + for a in names: + image_path = os.path.join(path, a) + images = os.listdir(image_path) + image_list.append(os.path.join(image_path, random.choice(images))) + return image_list, names + +def select_click(index, types, name_list): + name = name_list[int(index)] + path = os.path.join(inspiration_path, types, name) + images = os.listdir(path) + return name, [os.path.join(path, x) for x in images] + +def give_up_click(name, types): + file = os.path.join(inspiration_system_path, types + "_abandoned.txt") + name_list = read_name_list(file) + if name not in name_list: + save_name_list(file, name) + +def collect_click(name, types): + file = os.path.join(inspiration_system_path, types + "_faverites.txt") + print(file) + name_list = read_name_list(file) + print(name_list) + if name not in name_list: + save_name_list(file, name) + +def moveout_click(name, types): + file = os.path.join(inspiration_system_path, types + "_faverites.txt") + name_list = read_name_list(file) + if name not in name_list: + save_name_list(file, name) + +def source_change(source): + if source == "Abandoned" or source == "Favorites": + return gradio.Button.update(visible=True, value=f"Move out {source}") + else: + return gradio.Button.update(visible=False) + +def ui(gr, opts): + with gr.Blocks(analytics_enabled=False) as inspiration: + flag = os.path.exists(inspiration_path) + if flag: + types = os.listdir(inspiration_path) + types = [x for x in types if x != "system"] + flag = len(types) > 0 + if not flag: + os.mkdir(inspiration_path) + gr.HTML(""" +
" + """) + return inspiration + if not os.path.exists(inspiration_system_path): + os.mkdir(inspiration_system_path) + gallery, names = get_inspiration_images("Exclude abandoned", types[0]) + with gr.Row(): + with gr.Column(scale=2): + inspiration_gallery = gr.Gallery(gallery, show_label=False, elem_id="inspiration_gallery").style(grid=5, height='auto') + with gr.Column(scale=1): + types = gr.Dropdown(choices=types, value=types[0], label="Type", visible=len(types) > 1) + with gr.Row(): + source = gr.Dropdown(choices=["All", "Favorites", "Exclude abandoned", "Abandoned"], value="Exclude abandoned", label="Source") + get_inspiration = gr.Button("Get inspiration") + name = gr.Textbox(show_label=False, interactive=False) + with gr.Row(): + send_to_txt2img = gr.Button('to txt2img') + send_to_img2img = gr.Button('to img2img') + style_gallery = gr.Gallery(show_label=False, elem_id="inspiration_style_gallery").style(grid=2, height='auto') + + collect = gr.Button('Collect') + give_up = gr.Button("Don't show any more") + moveout = gr.Button("Move out", visible=False) + with gr.Row(): + select_button = gr.Button('set button', elem_id="inspiration_select_button") + name_list = gr.State(names) + source.change(source_change, inputs=[source], outputs=[moveout]) + get_inspiration.click(get_inspiration_images, inputs=[source, types], outputs=[inspiration_gallery, name_list]) + select_button.click(select_click, _js="inspiration_selected", inputs=[name, types, name_list], outputs=[name, style_gallery]) + give_up.click(give_up_click, inputs=[name, types], outputs=None) + collect.click(collect_click, inputs=[name, types], outputs=None) + return inspiration diff --git a/modules/shared.py b/modules/shared.py index faede821..ae033710 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -78,6 +78,7 @@ parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencode parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False) parser.add_argument("--api", action='store_true', help="use api=True to launch the api with the webui") parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the api instead of the webui") +parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI") cmd_opts = parser.parse_args() restricted_opts = [ diff --git a/modules/ui.py b/modules/ui.py index a2dbd41e..6a0a3c3b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -41,7 +41,8 @@ from modules import prompt_parser from modules.images import save_image import modules.textual_inversion.ui import modules.hypernetworks.ui -import modules.images_history as img_his +import modules.images_history as images_history +import modules.inspiration as inspiration # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI mimetypes.init() @@ -1082,9 +1083,9 @@ def create_ui(wrap_gradio_gpu_call): upscaling_resize_w = gr.Number(label="Width", value=512, precision=0) upscaling_resize_h = gr.Number(label="Height", value=512, precision=0) upscaling_crop = gr.Checkbox(label='Crop to fit', value=True) - + with gr.Group(): - extras_upscaler_1 = gr.Radio(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") + extras_upscaler_1 = gr.Radio(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers] , value=shared.sd_upscalers[0].name, type="index") with gr.Group(): extras_upscaler_2 = gr.Radio(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") @@ -1178,7 +1179,8 @@ def create_ui(wrap_gradio_gpu_call): "i2i":img2img_paste_fields } - images_history = img_his.create_history_tabs(gr, opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict) + browser_interface = images_history.create_history_tabs(gr, opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict) + inspiration_interface = inspiration.ui(gr, opts) with gr.Blocks() as modelmerger_interface: with gr.Row().style(equal_height=False): @@ -1595,7 +1597,8 @@ Requested path was: {f} (img2img_interface, "img2img", "img2img"), (extras_interface, "Extras", "extras"), (pnginfo_interface, "PNG Info", "pnginfo"), - (images_history, "History", "images_history"), + (browser_interface, "History", "images_history"), + (inspiration_interface, "Inspiration", "inspiration"), (modelmerger_interface, "Checkpoint Merger", "modelmerger"), (train_interface, "Train", "ti"), (settings_interface, "Settings", "settings"), -- cgit v1.2.1 From 108be15500aac590b4e00420635d7b61fccfa530 Mon Sep 17 00:00:00 2001 From: AngelBottomless <35677394+aria1th@users.noreply.github.com> Date: Fri, 21 Oct 2022 01:00:41 +0900 Subject: fix bugs and optimizations --- modules/hypernetworks/hypernetwork.py | 105 +++++++++++++++++++--------------- 1 file changed, 59 insertions(+), 46 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 905cbeef..893ba110 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -36,14 +36,14 @@ class HypernetworkModule(torch.nn.Module): linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1]))) # if skip_first_layer because first parameters potentially contain negative values # if i < 1: continue + if add_layer_norm: + linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) if activation_func in HypernetworkModule.activation_dict: linears.append(HypernetworkModule.activation_dict[activation_func]()) else: print("Invalid key {} encountered as activation function!".format(activation_func)) # if use_dropout: # linears.append(torch.nn.Dropout(p=0.3)) - if add_layer_norm: - linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) self.linear = torch.nn.Sequential(*linears) @@ -115,11 +115,24 @@ class Hypernetwork: for k, layers in self.layers.items(): for layer in layers: - layer.train() res += layer.trainables() return res + def eval(self): + for k, layers in self.layers.items(): + for layer in layers: + layer.eval() + for items in self.weights(): + items.requires_grad = False + + def train(self): + for k, layers in self.layers.items(): + for layer in layers: + layer.train() + for items in self.weights(): + items.requires_grad = True + def save(self, filename): state_dict = {} @@ -290,10 +303,6 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log shared.sd_model.first_stage_model.to(devices.cpu) hypernetwork = shared.loaded_hypernetwork - weights = hypernetwork.weights() - for weight in weights: - weight.requires_grad = True - losses = torch.zeros((32,)) last_saved_file = "" @@ -304,10 +313,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log return hypernetwork, filename scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) - # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc... - optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate) + optimizer = torch.optim.AdamW(hypernetwork.weights(), lr=scheduler.learn_rate) pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) + hypernetwork.train() for i, entries in pbar: hypernetwork.step = i + ititial_step @@ -328,8 +337,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log losses[hypernetwork.step % losses.shape[0]] = loss.item() - optimizer.zero_grad() + optimizer.zero_grad(set_to_none=True) loss.backward() + del loss optimizer.step() mean_loss = losses.mean() if torch.isnan(mean_loss): @@ -346,44 +356,47 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log }) if hypernetwork.step > 0 and images_dir is not None and hypernetwork.step % create_image_every == 0: + torch.cuda.empty_cache() last_saved_image = os.path.join(images_dir, f'{hypernetwork_name}-{hypernetwork.step}.png') + with torch.no_grad(): + hypernetwork.eval() + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + ) - optimizer.zero_grad() - shared.sd_model.cond_stage_model.to(devices.device) - shared.sd_model.first_stage_model.to(devices.device) - - p = processing.StableDiffusionProcessingTxt2Img( - sd_model=shared.sd_model, - do_not_save_grid=True, - do_not_save_samples=True, - ) - - if preview_from_txt2img: - p.prompt = preview_prompt - p.negative_prompt = preview_negative_prompt - p.steps = preview_steps - p.sampler_index = preview_sampler_index - p.cfg_scale = preview_cfg_scale - p.seed = preview_seed - p.width = preview_width - p.height = preview_height - else: - p.prompt = entries[0].cond_text - p.steps = 20 - - preview_text = p.prompt - - processed = processing.process_images(p) - image = processed.images[0] if len(processed.images)>0 else None - - if unload: - shared.sd_model.cond_stage_model.to(devices.cpu) - shared.sd_model.first_stage_model.to(devices.cpu) - - if image is not None: - shared.state.current_image = image - image.save(last_saved_image) - last_saved_image += f", prompt: {preview_text}" + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_index = preview_sampler_index + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = entries[0].cond_text + p.steps = 20 + + preview_text = p.prompt + + processed = processing.process_images(p) + image = processed.images[0] if len(processed.images)>0 else None + + if unload: + shared.sd_model.cond_stage_model.to(devices.cpu) + shared.sd_model.first_stage_model.to(devices.cpu) + + if image is not None: + shared.state.current_image = image + image.save(last_saved_image) + last_saved_image += f", prompt: {preview_text}" + + hypernetwork.train() shared.state.job_no = hypernetwork.step -- cgit v1.2.1 From f89829ec3a0baceb445451ad98d4fb4323e922aa Mon Sep 17 00:00:00 2001 From: aria1th <35677394+aria1th@users.noreply.github.com> Date: Fri, 21 Oct 2022 01:37:11 +0900 Subject: Revert "fix bugs and optimizations" This reverts commit 108be15500aac590b4e00420635d7b61fccfa530. --- modules/hypernetworks/hypernetwork.py | 105 +++++++++++++++------------------- 1 file changed, 46 insertions(+), 59 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 893ba110..905cbeef 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -36,14 +36,14 @@ class HypernetworkModule(torch.nn.Module): linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1]))) # if skip_first_layer because first parameters potentially contain negative values # if i < 1: continue - if add_layer_norm: - linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) if activation_func in HypernetworkModule.activation_dict: linears.append(HypernetworkModule.activation_dict[activation_func]()) else: print("Invalid key {} encountered as activation function!".format(activation_func)) # if use_dropout: # linears.append(torch.nn.Dropout(p=0.3)) + if add_layer_norm: + linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) self.linear = torch.nn.Sequential(*linears) @@ -115,24 +115,11 @@ class Hypernetwork: for k, layers in self.layers.items(): for layer in layers: + layer.train() res += layer.trainables() return res - def eval(self): - for k, layers in self.layers.items(): - for layer in layers: - layer.eval() - for items in self.weights(): - items.requires_grad = False - - def train(self): - for k, layers in self.layers.items(): - for layer in layers: - layer.train() - for items in self.weights(): - items.requires_grad = True - def save(self, filename): state_dict = {} @@ -303,6 +290,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log shared.sd_model.first_stage_model.to(devices.cpu) hypernetwork = shared.loaded_hypernetwork + weights = hypernetwork.weights() + for weight in weights: + weight.requires_grad = True + losses = torch.zeros((32,)) last_saved_file = "" @@ -313,10 +304,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log return hypernetwork, filename scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) - optimizer = torch.optim.AdamW(hypernetwork.weights(), lr=scheduler.learn_rate) + # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc... + optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate) pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) - hypernetwork.train() for i, entries in pbar: hypernetwork.step = i + ititial_step @@ -337,9 +328,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log losses[hypernetwork.step % losses.shape[0]] = loss.item() - optimizer.zero_grad(set_to_none=True) + optimizer.zero_grad() loss.backward() - del loss optimizer.step() mean_loss = losses.mean() if torch.isnan(mean_loss): @@ -356,47 +346,44 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log }) if hypernetwork.step > 0 and images_dir is not None and hypernetwork.step % create_image_every == 0: - torch.cuda.empty_cache() last_saved_image = os.path.join(images_dir, f'{hypernetwork_name}-{hypernetwork.step}.png') - with torch.no_grad(): - hypernetwork.eval() - shared.sd_model.cond_stage_model.to(devices.device) - shared.sd_model.first_stage_model.to(devices.device) - - p = processing.StableDiffusionProcessingTxt2Img( - sd_model=shared.sd_model, - do_not_save_grid=True, - do_not_save_samples=True, - ) - if preview_from_txt2img: - p.prompt = preview_prompt - p.negative_prompt = preview_negative_prompt - p.steps = preview_steps - p.sampler_index = preview_sampler_index - p.cfg_scale = preview_cfg_scale - p.seed = preview_seed - p.width = preview_width - p.height = preview_height - else: - p.prompt = entries[0].cond_text - p.steps = 20 - - preview_text = p.prompt - - processed = processing.process_images(p) - image = processed.images[0] if len(processed.images)>0 else None - - if unload: - shared.sd_model.cond_stage_model.to(devices.cpu) - shared.sd_model.first_stage_model.to(devices.cpu) - - if image is not None: - shared.state.current_image = image - image.save(last_saved_image) - last_saved_image += f", prompt: {preview_text}" - - hypernetwork.train() + optimizer.zero_grad() + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + ) + + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_index = preview_sampler_index + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = entries[0].cond_text + p.steps = 20 + + preview_text = p.prompt + + processed = processing.process_images(p) + image = processed.images[0] if len(processed.images)>0 else None + + if unload: + shared.sd_model.cond_stage_model.to(devices.cpu) + shared.sd_model.first_stage_model.to(devices.cpu) + + if image is not None: + shared.state.current_image = image + image.save(last_saved_image) + last_saved_image += f", prompt: {preview_text}" shared.state.job_no = hypernetwork.step -- cgit v1.2.1 From bb0f1a2cdae3410a41d06ae878f56e29b8154c41 Mon Sep 17 00:00:00 2001 From: yfszzx Date: Sat, 22 Oct 2022 01:23:00 +0800 Subject: inspiration finished --- modules/inspiration.py | 192 ++++++++++++++++++++++++++++++++----------------- modules/shared.py | 6 ++ modules/ui.py | 2 +- 3 files changed, 133 insertions(+), 67 deletions(-) (limited to 'modules') diff --git a/modules/inspiration.py b/modules/inspiration.py index 456bfcb5..f72ebf3a 100644 --- a/modules/inspiration.py +++ b/modules/inspiration.py @@ -1,122 +1,182 @@ import os import random -import gradio -inspiration_path = "inspiration" -inspiration_system_path = os.path.join(inspiration_path, "system") -def read_name_list(file): +import gradio +from modules.shared import opts +inspiration_system_path = os.path.join(opts.inspiration_dir, "system") +def read_name_list(file, types=None, keyword=None): if not os.path.exists(file): return [] - f = open(file, "r") ret = [] + f = open(file, "r") line = f.readline() while len(line) > 0: line = line.rstrip("\n") - ret.append(line) - print(ret) + if types is not None: + dirname = os.path.split(line) + if dirname[0] in types and keyword in dirname[1]: + ret.append(line) + else: + ret.append(line) + line = f.readline() return ret def save_name_list(file, name): - print(file) - f = open(file, "a") - f.write(name + "\n") + with open(file, "a") as f: + f.write(name + "\n") -def get_inspiration_images(source, types): - path = os.path.join(inspiration_path , types) +def get_types_list(): + files = os.listdir(opts.inspiration_dir) + types = [] + for x in files: + path = os.path.join(opts.inspiration_dir, x) + if x[0] == ".": + continue + if not os.path.isdir(path): + continue + if path == inspiration_system_path: + continue + types.append(x) + return types + +def get_inspiration_images(source, types, keyword): + get_num = int(opts.inspiration_rows_num * opts.inspiration_cols_num) if source == "Favorites": - names = read_name_list(os.path.join(inspiration_system_path, types + "_faverites.txt")) - names = random.sample(names, 25) + names = read_name_list(os.path.join(inspiration_system_path, "faverites.txt"), types, keyword) + names = random.sample(names, get_num) if len(names) > get_num else names elif source == "Abandoned": - names = read_name_list(os.path.join(inspiration_system_path, types + "_abondened.txt")) - names = random.sample(names, 25) - elif source == "Exclude abandoned": - abondened = read_name_list(os.path.join(inspiration_system_path, types + "_abondened.txt")) - all_names = os.listdir(path) - names = [] - while len(names) < 25: - name = random.choice(all_names) - if name not in abondened: - names.append(name) + names = read_name_list(os.path.join(inspiration_system_path, "abandoned.txt"), types, keyword) + print(names) + names = random.sample(names, get_num) if len(names) > get_num else names + elif source == "Exclude abandoned": + abandoned = read_name_list(os.path.join(inspiration_system_path, "abandoned.txt"), types, keyword) + all_names = [] + for tp in types: + name_list = os.listdir(os.path.join(opts.inspiration_dir, tp)) + all_names += [os.path.join(tp, x) for x in name_list if keyword in x] + + if len(all_names) > get_num: + names = [] + while len(names) < get_num: + name = random.choice(all_names) + if name not in abandoned: + names.append(name) + else: + names = all_names else: - names = random.sample(os.listdir(path), 25) - names = random.sample(names, 25) + all_names = [] + for tp in types: + name_list = os.listdir(os.path.join(opts.inspiration_dir, tp)) + all_names += [os.path.join(tp, x) for x in name_list if keyword in x] + names = random.sample(all_names, get_num) if len(all_names) > get_num else all_names image_list = [] for a in names: - image_path = os.path.join(path, a) + image_path = os.path.join(opts.inspiration_dir, a) images = os.listdir(image_path) - image_list.append(os.path.join(image_path, random.choice(images))) - return image_list, names + image_list.append((os.path.join(image_path, random.choice(images)), a)) + return image_list, names, "" -def select_click(index, types, name_list): +def select_click(index, name_list): name = name_list[int(index)] - path = os.path.join(inspiration_path, types, name) + path = os.path.join(opts.inspiration_dir, name) images = os.listdir(path) - return name, [os.path.join(path, x) for x in images] + return name, [os.path.join(path, x) for x in images], "" -def give_up_click(name, types): - file = os.path.join(inspiration_system_path, types + "_abandoned.txt") +def give_up_click(name): + file = os.path.join(inspiration_system_path, "abandoned.txt") name_list = read_name_list(file) if name not in name_list: save_name_list(file, name) + return "Added to abandoned list" -def collect_click(name, types): - file = os.path.join(inspiration_system_path, types + "_faverites.txt") - print(file) +def collect_click(name): + file = os.path.join(inspiration_system_path, "faverites.txt") name_list = read_name_list(file) - print(name_list) if name not in name_list: save_name_list(file, name) + return "Added to faverite list" -def moveout_click(name, types): - file = os.path.join(inspiration_system_path, types + "_faverites.txt") +def moveout_click(name, source): + if source == "Abandoned": + file = os.path.join(inspiration_system_path, "abandoned.txt") + if source == "Favorites": + file = os.path.join(inspiration_system_path, "faverites.txt") + else: + return None name_list = read_name_list(file) - if name not in name_list: - save_name_list(file, name) + os.remove(file) + with open(file, "a") as f: + for a in name_list: + if a != name: + f.write(a) + return "Moved out {name} from {source} list" def source_change(source): - if source == "Abandoned" or source == "Favorites": - return gradio.Button.update(visible=True, value=f"Move out {source}") + if source in ["Abandoned", "Favorites"]: + return gradio.update(visible=True), [] else: - return gradio.Button.update(visible=False) + return gradio.update(visible=False), [] +def add_to_prompt(name, prompt): + print(name, prompt) + name = os.path.basename(name) + return prompt + "," + name -def ui(gr, opts): +def ui(gr, opts, txt2img_prompt, img2img_prompt): with gr.Blocks(analytics_enabled=False) as inspiration: - flag = os.path.exists(inspiration_path) + flag = os.path.exists(opts.inspiration_dir) if flag: - types = os.listdir(inspiration_path) - types = [x for x in types if x != "system"] + types = get_types_list() flag = len(types) > 0 - if not flag: - os.mkdir(inspiration_path) + else: + os.makedirs(opts.inspiration_dir) + if not flag: gr.HTML(""" -
" +

To activate inspiration function, you need get "inspiration" images first.


+ You can create these images by run "Create inspiration images" script in txt2img page,
you can get the artists or art styles list from here
+ https://github.com/pharmapsychotic/clip-interrogator/tree/main/data
+ download these files, and select these files in the "Create inspiration images" script UI
+ There about 6000 artists and art styles in these files.
This takes server hours depending on your GPU type and how many pictures you generate for each artist/style +
I suggest at least four images for each


+

You can also download generated pictures from here:


+ https://huggingface.co/datasets/yfszzx/inspiration
+ unzip the file to the project directory of webui
+ and restart webui, and enjoy the joy of creation!
""") return inspiration if not os.path.exists(inspiration_system_path): os.mkdir(inspiration_system_path) - gallery, names = get_inspiration_images("Exclude abandoned", types[0]) with gr.Row(): with gr.Column(scale=2): - inspiration_gallery = gr.Gallery(gallery, show_label=False, elem_id="inspiration_gallery").style(grid=5, height='auto') + inspiration_gallery = gr.Gallery(show_label=False, elem_id="inspiration_gallery").style(grid=opts.inspiration_cols_num, height='auto') with gr.Column(scale=1): - types = gr.Dropdown(choices=types, value=types[0], label="Type", visible=len(types) > 1) + print(types) + types = gr.CheckboxGroup(choices=types, value=types) + keyword = gr.Textbox("", label="Key word") with gr.Row(): source = gr.Dropdown(choices=["All", "Favorites", "Exclude abandoned", "Abandoned"], value="Exclude abandoned", label="Source") - get_inspiration = gr.Button("Get inspiration") + get_inspiration = gr.Button("Get inspiration", elem_id="inspiration_get_button") name = gr.Textbox(show_label=False, interactive=False) with gr.Row(): send_to_txt2img = gr.Button('to txt2img') send_to_img2img = gr.Button('to img2img') - style_gallery = gr.Gallery(show_label=False, elem_id="inspiration_style_gallery").style(grid=2, height='auto') - + style_gallery = gr.Gallery(show_label=False).style(grid=2, height='auto') collect = gr.Button('Collect') - give_up = gr.Button("Don't show any more") + give_up = gr.Button("Don't show again") moveout = gr.Button("Move out", visible=False) - with gr.Row(): + warning = gr.HTML() + with gr.Row(visible=False): select_button = gr.Button('set button', elem_id="inspiration_select_button") - name_list = gr.State(names) - source.change(source_change, inputs=[source], outputs=[moveout]) - get_inspiration.click(get_inspiration_images, inputs=[source, types], outputs=[inspiration_gallery, name_list]) - select_button.click(select_click, _js="inspiration_selected", inputs=[name, types, name_list], outputs=[name, style_gallery]) - give_up.click(give_up_click, inputs=[name, types], outputs=None) - collect.click(collect_click, inputs=[name, types], outputs=None) + name_list = gr.State() + + get_inspiration.click(get_inspiration_images, inputs=[source, types, keyword], outputs=[inspiration_gallery, name_list, keyword]) + source.change(source_change, inputs=[source], outputs=[moveout, style_gallery]) + source.change(fn=None, _js="inspiration_click_get_button", inputs=None, outputs=None) + keyword.submit(fn=None, _js="inspiration_click_get_button", inputs=None, outputs=None) + select_button.click(select_click, _js="inspiration_selected", inputs=[name, name_list], outputs=[name, style_gallery, warning]) + give_up.click(give_up_click, inputs=[name], outputs=[warning]) + collect.click(collect_click, inputs=[name], outputs=[warning]) + moveout.click(moveout_click, inputs=[name, source], outputs=[warning]) + send_to_txt2img.click(add_to_prompt, inputs=[name, txt2img_prompt], outputs=[txt2img_prompt]) + send_to_img2img.click(add_to_prompt, inputs=[name, img2img_prompt], outputs=[img2img_prompt]) + send_to_txt2img.click(None, _js='switch_to_txt2img', inputs=None, outputs=None) + send_to_img2img.click(None, _js="switch_to_img2img_img2img", inputs=None, outputs=None) return inspiration diff --git a/modules/shared.py b/modules/shared.py index ae033710..564b1b8d 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -316,6 +316,12 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), })) +options_templates.update(options_section(('inspiration', "Inspiration"), { + "inspiration_dir": OptionInfo("inspiration", "Directory of inspiration", component_args=hide_dirs), + "inspiration_max_samples": OptionInfo(4, "Maximum number of samples, used to determine which folders to skip when continue running the create script", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}), + "inspiration_rows_num": OptionInfo(4, "Rows of inspiration interface frame", gr.Slider, {"minimum": 4, "maximum": 16, "step": 1}), + "inspiration_cols_num": OptionInfo(8, "Columns of inspiration interface frame", gr.Slider, {"minimum": 4, "maximum": 16, "step": 1}), +})) class Options: data = None diff --git a/modules/ui.py b/modules/ui.py index 6a0a3c3b..b651eb9c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1180,7 +1180,7 @@ def create_ui(wrap_gradio_gpu_call): } browser_interface = images_history.create_history_tabs(gr, opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict) - inspiration_interface = inspiration.ui(gr, opts) + inspiration_interface = inspiration.ui(gr, opts, txt2img_prompt, img2img_prompt) with gr.Blocks() as modelmerger_interface: with gr.Row().style(equal_height=False): -- cgit v1.2.1 From 57eb54b838faa383c10079e1bb5471b7bee6a695 Mon Sep 17 00:00:00 2001 From: Extraltodeus Date: Sat, 22 Oct 2022 00:11:07 +0200 Subject: implement CUDA device selection by ID --- modules/devices.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/devices.py b/modules/devices.py index eb422583..8a159282 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -1,7 +1,6 @@ +import sys, os, shlex import contextlib - import torch - from modules import errors # has_mps is only available in nightly pytorch (for now), `getattr` for compatibility @@ -9,10 +8,26 @@ has_mps = getattr(torch, 'has_mps', False) cpu = torch.device("cpu") +def extract_device_id(args, name): + for x in range(len(args)): + if name in args[x]: return args[x+1] + return None def get_optimal_device(): if torch.cuda.is_available(): - return torch.device("cuda") + # CUDA device selection support: + if "shared" not in sys.modules: + commandline_args = os.environ.get('COMMANDLINE_ARGS', "") #re-parse the commandline arguments because using the shared.py module creates an import loop. + sys.argv += shlex.split(commandline_args) + device_id = extract_device_id(sys.argv, '--device-id') + else: + device_id = shared.cmd_opts.device_id + + if device_id is not None: + cuda_device = f"cuda:{device_id}" + return torch.device(cuda_device) + else: + return torch.device("cuda") if has_mps: return torch.device("mps") -- cgit v1.2.1 From 29bfacd63cb5c73b9643d94f255cca818fd49d9c Mon Sep 17 00:00:00 2001 From: Extraltodeus Date: Sat, 22 Oct 2022 00:12:46 +0200 Subject: implement CUDA device selection, --device-id arg --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 41d7f08e..03032a47 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -80,6 +80,7 @@ parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencode parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False) parser.add_argument("--api", action='store_true', help="use api=True to launch the api with the webui") parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the api instead of the webui") +parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None) cmd_opts = parser.parse_args() restricted_opts = [ -- cgit v1.2.1 From 40ddb6df61564684263c7442bacf61efe3882b87 Mon Sep 17 00:00:00 2001 From: yfszzx Date: Sat, 22 Oct 2022 10:16:22 +0800 Subject: inspiration perfected --- modules/inspiration.py | 71 +++++++++++++++++++++++++++----------------------- 1 file changed, 39 insertions(+), 32 deletions(-) (limited to 'modules') diff --git a/modules/inspiration.py b/modules/inspiration.py index f72ebf3a..319183ab 100644 --- a/modules/inspiration.py +++ b/modules/inspiration.py @@ -13,7 +13,7 @@ def read_name_list(file, types=None, keyword=None): line = line.rstrip("\n") if types is not None: dirname = os.path.split(line) - if dirname[0] in types and keyword in dirname[1]: + if dirname[0] in types and keyword in dirname[1].lower(): ret.append(line) else: ret.append(line) @@ -21,8 +21,10 @@ def read_name_list(file, types=None, keyword=None): return ret def save_name_list(file, name): - with open(file, "a") as f: - f.write(name + "\n") + name_list = read_name_list(file) + if name not in name_list: + with open(file, "a") as f: + f.write(name + "\n") def get_types_list(): files = os.listdir(opts.inspiration_dir) @@ -39,20 +41,20 @@ def get_types_list(): return types def get_inspiration_images(source, types, keyword): + keyword = keyword.strip(" ").lower() get_num = int(opts.inspiration_rows_num * opts.inspiration_cols_num) if source == "Favorites": names = read_name_list(os.path.join(inspiration_system_path, "faverites.txt"), types, keyword) names = random.sample(names, get_num) if len(names) > get_num else names elif source == "Abandoned": names = read_name_list(os.path.join(inspiration_system_path, "abandoned.txt"), types, keyword) - print(names) names = random.sample(names, get_num) if len(names) > get_num else names elif source == "Exclude abandoned": abandoned = read_name_list(os.path.join(inspiration_system_path, "abandoned.txt"), types, keyword) all_names = [] for tp in types: name_list = os.listdir(os.path.join(opts.inspiration_dir, tp)) - all_names += [os.path.join(tp, x) for x in name_list if keyword in x] + all_names += [os.path.join(tp, x) for x in name_list if keyword in x.lower()] if len(all_names) > get_num: names = [] @@ -66,14 +68,14 @@ def get_inspiration_images(source, types, keyword): all_names = [] for tp in types: name_list = os.listdir(os.path.join(opts.inspiration_dir, tp)) - all_names += [os.path.join(tp, x) for x in name_list if keyword in x] + all_names += [os.path.join(tp, x) for x in name_list if keyword in x.lower()] names = random.sample(all_names, get_num) if len(all_names) > get_num else all_names image_list = [] for a in names: image_path = os.path.join(opts.inspiration_dir, a) images = os.listdir(image_path) image_list.append((os.path.join(image_path, random.choice(images)), a)) - return image_list, names, "" + return image_list, names def select_click(index, name_list): name = name_list[int(index)] @@ -83,22 +85,18 @@ def select_click(index, name_list): def give_up_click(name): file = os.path.join(inspiration_system_path, "abandoned.txt") - name_list = read_name_list(file) - if name not in name_list: - save_name_list(file, name) + save_name_list(file, name) return "Added to abandoned list" def collect_click(name): file = os.path.join(inspiration_system_path, "faverites.txt") - name_list = read_name_list(file) - if name not in name_list: - save_name_list(file, name) + save_name_list(file, name) return "Added to faverite list" def moveout_click(name, source): if source == "Abandoned": file = os.path.join(inspiration_system_path, "abandoned.txt") - if source == "Favorites": + elif source == "Favorites": file = os.path.join(inspiration_system_path, "faverites.txt") else: return None @@ -107,8 +105,8 @@ def moveout_click(name, source): with open(file, "a") as f: for a in name_list: if a != name: - f.write(a) - return "Moved out {name} from {source} list" + f.write(a + "\n") + return f"Moved out {name} from {source} list" def source_change(source): if source in ["Abandoned", "Favorites"]: @@ -116,10 +114,12 @@ def source_change(source): else: return gradio.update(visible=False), [] def add_to_prompt(name, prompt): - print(name, prompt) name = os.path.basename(name) return prompt + "," + name +def clear_keyword(): + return "" + def ui(gr, opts, txt2img_prompt, img2img_prompt): with gr.Blocks(analytics_enabled=False) as inspiration: flag = os.path.exists(opts.inspiration_dir) @@ -132,15 +132,15 @@ def ui(gr, opts, txt2img_prompt, img2img_prompt): gr.HTML("""

To activate inspiration function, you need get "inspiration" images first.


You can create these images by run "Create inspiration images" script in txt2img page,
you can get the artists or art styles list from here
- https://github.com/pharmapsychotic/clip-interrogator/tree/main/data
+ https://github.com/pharmapsychotic/clip-interrogator/tree/main/data
download these files, and select these files in the "Create inspiration images" script UI
There about 6000 artists and art styles in these files.
This takes server hours depending on your GPU type and how many pictures you generate for each artist/style
I suggest at least four images for each


You can also download generated pictures from here:


- https://huggingface.co/datasets/yfszzx/inspiration
+ https://huggingface.co/datasets/yfszzx/inspiration
unzip the file to the project directory of webui
and restart webui, and enjoy the joy of creation!
- """) + """) return inspiration if not os.path.exists(inspiration_system_path): os.mkdir(inspiration_system_path) @@ -148,35 +148,42 @@ def ui(gr, opts, txt2img_prompt, img2img_prompt): with gr.Column(scale=2): inspiration_gallery = gr.Gallery(show_label=False, elem_id="inspiration_gallery").style(grid=opts.inspiration_cols_num, height='auto') with gr.Column(scale=1): - print(types) types = gr.CheckboxGroup(choices=types, value=types) - keyword = gr.Textbox("", label="Key word") - with gr.Row(): - source = gr.Dropdown(choices=["All", "Favorites", "Exclude abandoned", "Abandoned"], value="Exclude abandoned", label="Source") - get_inspiration = gr.Button("Get inspiration", elem_id="inspiration_get_button") - name = gr.Textbox(show_label=False, interactive=False) with gr.Row(): + source = gr.Dropdown(choices=["All", "Favorites", "Exclude abandoned", "Abandoned"], value="Exclude abandoned", label="Source") + keyword = gr.Textbox("", label="Key word") + get_inspiration = gr.Button("Get inspiration", elem_id="inspiration_get_button") + name = gr.Textbox(show_label=False, interactive=False) + with gr.Row(): send_to_txt2img = gr.Button('to txt2img') send_to_img2img = gr.Button('to img2img') style_gallery = gr.Gallery(show_label=False).style(grid=2, height='auto') - collect = gr.Button('Collect') - give_up = gr.Button("Don't show again") - moveout = gr.Button("Move out", visible=False) warning = gr.HTML() + with gr.Row(): + collect = gr.Button('Collect') + give_up = gr.Button("Don't show again") + moveout = gr.Button("Move out", visible=False) + with gr.Row(visible=False): select_button = gr.Button('set button', elem_id="inspiration_select_button") name_list = gr.State() - get_inspiration.click(get_inspiration_images, inputs=[source, types, keyword], outputs=[inspiration_gallery, name_list, keyword]) - source.change(source_change, inputs=[source], outputs=[moveout, style_gallery]) - source.change(fn=None, _js="inspiration_click_get_button", inputs=None, outputs=None) + get_inspiration.click(get_inspiration_images, inputs=[source, types, keyword], outputs=[inspiration_gallery, name_list]) keyword.submit(fn=None, _js="inspiration_click_get_button", inputs=None, outputs=None) + source.change(source_change, inputs=[source], outputs=[moveout, style_gallery]) + source.change(fn=clear_keyword, _js="inspiration_click_get_button", inputs=None, outputs=[keyword]) + types.change(fn=clear_keyword, _js="inspiration_click_get_button", inputs=None, outputs=[keyword]) + select_button.click(select_click, _js="inspiration_selected", inputs=[name, name_list], outputs=[name, style_gallery, warning]) give_up.click(give_up_click, inputs=[name], outputs=[warning]) collect.click(collect_click, inputs=[name], outputs=[warning]) moveout.click(moveout_click, inputs=[name, source], outputs=[warning]) + moveout.click(fn=None, _js="inspiration_click_get_button", inputs=None, outputs=None) + send_to_txt2img.click(add_to_prompt, inputs=[name, txt2img_prompt], outputs=[txt2img_prompt]) send_to_img2img.click(add_to_prompt, inputs=[name, img2img_prompt], outputs=[img2img_prompt]) + send_to_txt2img.click(collect_click, inputs=[name], outputs=[warning]) + send_to_img2img.click(collect_click, inputs=[name], outputs=[warning]) send_to_txt2img.click(None, _js='switch_to_txt2img', inputs=None, outputs=None) send_to_img2img.click(None, _js="switch_to_img2img_img2img", inputs=None, outputs=None) return inspiration -- cgit v1.2.1 From d93ea5cdeb2fd3607b7265271ccab2c9bf4c1156 Mon Sep 17 00:00:00 2001 From: yfszzx Date: Sat, 22 Oct 2022 10:21:21 +0800 Subject: inspiration perfected --- modules/inspiration.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/inspiration.py b/modules/inspiration.py index 319183ab..94ff139a 100644 --- a/modules/inspiration.py +++ b/modules/inspiration.py @@ -73,8 +73,11 @@ def get_inspiration_images(source, types, keyword): image_list = [] for a in names: image_path = os.path.join(opts.inspiration_dir, a) - images = os.listdir(image_path) - image_list.append((os.path.join(image_path, random.choice(images)), a)) + images = os.listdir(image_path) + if len(images) > 0: + image_list.append((os.path.join(image_path, random.choice(images)), a)) + else: + print(image_path) return image_list, names def select_click(index, name_list): -- cgit v1.2.1 From 67b78f0ea6f196bfdca49932da062631bb40d0b1 Mon Sep 17 00:00:00 2001 From: yfszzx Date: Sat, 22 Oct 2022 10:29:23 +0800 Subject: inspiration perfected --- modules/inspiration.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/inspiration.py b/modules/inspiration.py index 94ff139a..29cf8297 100644 --- a/modules/inspiration.py +++ b/modules/inspiration.py @@ -160,12 +160,13 @@ def ui(gr, opts, txt2img_prompt, img2img_prompt): with gr.Row(): send_to_txt2img = gr.Button('to txt2img') send_to_img2img = gr.Button('to img2img') - style_gallery = gr.Gallery(show_label=False).style(grid=2, height='auto') - warning = gr.HTML() - with gr.Row(): collect = gr.Button('Collect') give_up = gr.Button("Don't show again") - moveout = gr.Button("Move out", visible=False) + moveout = gr.Button("Move out", visible=False) + warning = gr.HTML() + style_gallery = gr.Gallery(show_label=False).style(grid=2, height='auto') + + with gr.Row(visible=False): select_button = gr.Button('set button', elem_id="inspiration_select_button") -- cgit v1.2.1 From 2b91251637078e04472c91a06a8d9c4db9c1dcf0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 22 Oct 2022 12:23:45 +0300 Subject: removed aesthetic gradients as built-in added support for extensions --- modules/aesthetic_clip.py | 241 -------------------------------------------- modules/images_history.py | 2 +- modules/img2img.py | 5 +- modules/processing.py | 35 ++++--- modules/script_callbacks.py | 42 ++++++++ modules/scripts.py | 210 ++++++++++++++++++++++++++++---------- modules/sd_hijack.py | 1 - modules/sd_models.py | 7 +- modules/shared.py | 19 ---- modules/txt2img.py | 5 +- modules/ui.py | 83 +++------------ 11 files changed, 244 insertions(+), 406 deletions(-) delete mode 100644 modules/aesthetic_clip.py create mode 100644 modules/script_callbacks.py (limited to 'modules') diff --git a/modules/aesthetic_clip.py b/modules/aesthetic_clip.py deleted file mode 100644 index 8c828541..00000000 --- a/modules/aesthetic_clip.py +++ /dev/null @@ -1,241 +0,0 @@ -import copy -import itertools -import os -from pathlib import Path -import html -import gc - -import gradio as gr -import torch -from PIL import Image -from torch import optim - -from modules import shared -from transformers import CLIPModel, CLIPProcessor, CLIPTokenizer -from tqdm.auto import tqdm, trange -from modules.shared import opts, device - - -def get_all_images_in_folder(folder): - return [os.path.join(folder, f) for f in os.listdir(folder) if - os.path.isfile(os.path.join(folder, f)) and check_is_valid_image_file(f)] - - -def check_is_valid_image_file(filename): - return filename.lower().endswith(('.png', '.jpg', '.jpeg', ".gif", ".tiff", ".webp")) - - -def batched(dataset, total, n=1): - for ndx in range(0, total, n): - yield [dataset.__getitem__(i) for i in range(ndx, min(ndx + n, total))] - - -def iter_to_batched(iterable, n=1): - it = iter(iterable) - while True: - chunk = tuple(itertools.islice(it, n)) - if not chunk: - return - yield chunk - - -def create_ui(): - import modules.ui - - with gr.Group(): - with gr.Accordion("Open for Clip Aesthetic!", open=False): - with gr.Row(): - aesthetic_weight = gr.Slider(minimum=0, maximum=1, step=0.01, label="Aesthetic weight", - value=0.9) - aesthetic_steps = gr.Slider(minimum=0, maximum=50, step=1, label="Aesthetic steps", value=5) - - with gr.Row(): - aesthetic_lr = gr.Textbox(label='Aesthetic learning rate', - placeholder="Aesthetic learning rate", value="0.0001") - aesthetic_slerp = gr.Checkbox(label="Slerp interpolation", value=False) - aesthetic_imgs = gr.Dropdown(sorted(shared.aesthetic_embeddings.keys()), - label="Aesthetic imgs embedding", - value="None") - - modules.ui.create_refresh_button(aesthetic_imgs, shared.update_aesthetic_embeddings, lambda: {"choices": sorted(shared.aesthetic_embeddings.keys())}, "refresh_aesthetic_embeddings") - - with gr.Row(): - aesthetic_imgs_text = gr.Textbox(label='Aesthetic text for imgs', - placeholder="This text is used to rotate the feature space of the imgs embs", - value="") - aesthetic_slerp_angle = gr.Slider(label='Slerp angle', minimum=0, maximum=1, step=0.01, - value=0.1) - aesthetic_text_negative = gr.Checkbox(label="Is negative text", value=False) - - return aesthetic_weight, aesthetic_steps, aesthetic_lr, aesthetic_slerp, aesthetic_imgs, aesthetic_imgs_text, aesthetic_slerp_angle, aesthetic_text_negative - - -aesthetic_clip_model = None - - -def aesthetic_clip(): - global aesthetic_clip_model - - if aesthetic_clip_model is None or aesthetic_clip_model.name_or_path != shared.sd_model.cond_stage_model.wrapped.transformer.name_or_path: - aesthetic_clip_model = CLIPModel.from_pretrained(shared.sd_model.cond_stage_model.wrapped.transformer.name_or_path) - aesthetic_clip_model.cpu() - - return aesthetic_clip_model - - -def generate_imgs_embd(name, folder, batch_size): - model = aesthetic_clip().to(device) - processor = CLIPProcessor.from_pretrained(model.name_or_path) - - with torch.no_grad(): - embs = [] - for paths in tqdm(iter_to_batched(get_all_images_in_folder(folder), batch_size), - desc=f"Generating embeddings for {name}"): - if shared.state.interrupted: - break - inputs = processor(images=[Image.open(path) for path in paths], return_tensors="pt").to(device) - outputs = model.get_image_features(**inputs).cpu() - embs.append(torch.clone(outputs)) - inputs.to("cpu") - del inputs, outputs - - embs = torch.cat(embs, dim=0).mean(dim=0, keepdim=True) - - # The generated embedding will be located here - path = str(Path(shared.cmd_opts.aesthetic_embeddings_dir) / f"{name}.pt") - torch.save(embs, path) - - model.cpu() - del processor - del embs - gc.collect() - torch.cuda.empty_cache() - res = f""" - Done generating embedding for {name}! - Aesthetic embedding saved to {html.escape(path)} - """ - shared.update_aesthetic_embeddings() - return gr.Dropdown.update(choices=sorted(shared.aesthetic_embeddings.keys()), label="Imgs embedding", - value="None"), \ - gr.Dropdown.update(choices=sorted(shared.aesthetic_embeddings.keys()), - label="Imgs embedding", - value="None"), res, "" - - -def slerp(low, high, val): - low_norm = low / torch.norm(low, dim=1, keepdim=True) - high_norm = high / torch.norm(high, dim=1, keepdim=True) - omega = torch.acos((low_norm * high_norm).sum(1)) - so = torch.sin(omega) - res = (torch.sin((1.0 - val) * omega) / so).unsqueeze(1) * low + (torch.sin(val * omega) / so).unsqueeze(1) * high - return res - - -class AestheticCLIP: - def __init__(self): - self.skip = False - self.aesthetic_steps = 0 - self.aesthetic_weight = 0 - self.aesthetic_lr = 0 - self.slerp = False - self.aesthetic_text_negative = "" - self.aesthetic_slerp_angle = 0 - self.aesthetic_imgs_text = "" - - self.image_embs_name = None - self.image_embs = None - self.load_image_embs(None) - - def set_aesthetic_params(self, p, aesthetic_lr=0, aesthetic_weight=0, aesthetic_steps=0, image_embs_name=None, - aesthetic_slerp=True, aesthetic_imgs_text="", - aesthetic_slerp_angle=0.15, - aesthetic_text_negative=False): - self.aesthetic_imgs_text = aesthetic_imgs_text - self.aesthetic_slerp_angle = aesthetic_slerp_angle - self.aesthetic_text_negative = aesthetic_text_negative - self.slerp = aesthetic_slerp - self.aesthetic_lr = aesthetic_lr - self.aesthetic_weight = aesthetic_weight - self.aesthetic_steps = aesthetic_steps - self.load_image_embs(image_embs_name) - - if self.image_embs_name is not None: - p.extra_generation_params.update({ - "Aesthetic LR": aesthetic_lr, - "Aesthetic weight": aesthetic_weight, - "Aesthetic steps": aesthetic_steps, - "Aesthetic embedding": self.image_embs_name, - "Aesthetic slerp": aesthetic_slerp, - "Aesthetic text": aesthetic_imgs_text, - "Aesthetic text negative": aesthetic_text_negative, - "Aesthetic slerp angle": aesthetic_slerp_angle, - }) - - def set_skip(self, skip): - self.skip = skip - - def load_image_embs(self, image_embs_name): - if image_embs_name is None or len(image_embs_name) == 0 or image_embs_name == "None": - image_embs_name = None - self.image_embs_name = None - if image_embs_name is not None and self.image_embs_name != image_embs_name: - self.image_embs_name = image_embs_name - self.image_embs = torch.load(shared.aesthetic_embeddings[self.image_embs_name], map_location=device) - self.image_embs /= self.image_embs.norm(dim=-1, keepdim=True) - self.image_embs.requires_grad_(False) - - def __call__(self, z, remade_batch_tokens): - if not self.skip and self.aesthetic_steps != 0 and self.aesthetic_lr != 0 and self.aesthetic_weight != 0 and self.image_embs_name is not None: - tokenizer = shared.sd_model.cond_stage_model.tokenizer - if not opts.use_old_emphasis_implementation: - remade_batch_tokens = [ - [tokenizer.bos_token_id] + x[:75] + [tokenizer.eos_token_id] for x in - remade_batch_tokens] - - tokens = torch.asarray(remade_batch_tokens).to(device) - - model = copy.deepcopy(aesthetic_clip()).to(device) - model.requires_grad_(True) - if self.aesthetic_imgs_text is not None and len(self.aesthetic_imgs_text) > 0: - text_embs_2 = model.get_text_features( - **tokenizer([self.aesthetic_imgs_text], padding=True, return_tensors="pt").to(device)) - if self.aesthetic_text_negative: - text_embs_2 = self.image_embs - text_embs_2 - text_embs_2 /= text_embs_2.norm(dim=-1, keepdim=True) - img_embs = slerp(self.image_embs, text_embs_2, self.aesthetic_slerp_angle) - else: - img_embs = self.image_embs - - with torch.enable_grad(): - - # We optimize the model to maximize the similarity - optimizer = optim.Adam( - model.text_model.parameters(), lr=self.aesthetic_lr - ) - - for _ in trange(self.aesthetic_steps, desc="Aesthetic optimization"): - text_embs = model.get_text_features(input_ids=tokens) - text_embs = text_embs / text_embs.norm(dim=-1, keepdim=True) - sim = text_embs @ img_embs.T - loss = -sim - optimizer.zero_grad() - loss.mean().backward() - optimizer.step() - - zn = model.text_model(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers) - if opts.CLIP_stop_at_last_layers > 1: - zn = zn.hidden_states[-opts.CLIP_stop_at_last_layers] - zn = model.text_model.final_layer_norm(zn) - else: - zn = zn.last_hidden_state - model.cpu() - del model - gc.collect() - torch.cuda.empty_cache() - zn = torch.concat([zn[77 * i:77 * (i + 1)] for i in range(max(z.shape[1] // 77, 1))], 1) - if self.slerp: - z = slerp(z, zn, self.aesthetic_weight) - else: - z = z * (1 - self.aesthetic_weight) + zn * self.aesthetic_weight - - return z diff --git a/modules/images_history.py b/modules/images_history.py index 78fd0543..bc5cf11f 100644 --- a/modules/images_history.py +++ b/modules/images_history.py @@ -310,7 +310,7 @@ def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): forward = gr.Button('Prev batch') backward = gr.Button('Next batch') with gr.Column(scale=3): - load_info = gr.HTML(visible=not custom_dir) + load_info = gr.HTML(visible=not custom_dir) with gr.Row(visible=False) as warning: warning_box = gr.Textbox("Message", interactive=False) diff --git a/modules/img2img.py b/modules/img2img.py index eea5199b..8d9f7cf9 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -56,7 +56,7 @@ def process_batch(p, input_dir, output_dir, args): processed_image.save(os.path.join(output_dir, filename)) -def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, init_img, init_img_with_mask, init_img_inpaint, init_mask_inpaint, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, aesthetic_lr=0, aesthetic_weight=0, aesthetic_steps=0, aesthetic_imgs=None, aesthetic_slerp=False, aesthetic_imgs_text="", aesthetic_slerp_angle=0.15, aesthetic_text_negative=False, *args): +def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, init_img, init_img_with_mask, init_img_inpaint, init_mask_inpaint, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, *args): is_inpaint = mode == 1 is_batch = mode == 2 @@ -109,7 +109,8 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro inpainting_mask_invert=inpainting_mask_invert, ) - shared.aesthetic_clip.set_aesthetic_params(p, float(aesthetic_lr), float(aesthetic_weight), int(aesthetic_steps), aesthetic_imgs, aesthetic_slerp, aesthetic_imgs_text, aesthetic_slerp_angle, aesthetic_text_negative) + p.scripts = modules.scripts.scripts_txt2img + p.script_args = args if shared.cmd_opts.enable_console_prompts: print(f"\nimg2img: {prompt}", file=shared.progress_print_out) diff --git a/modules/processing.py b/modules/processing.py index ff1ec4c9..372489f7 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -104,6 +104,12 @@ class StableDiffusionProcessing(): self.seed_resize_from_h = 0 self.seed_resize_from_w = 0 + self.scripts = None + self.script_args = None + self.all_prompts = None + self.all_seeds = None + self.all_subseeds = None + def init(self, all_prompts, all_seeds, all_subseeds): pass @@ -350,32 +356,35 @@ def process_images(p: StableDiffusionProcessing) -> Processed: shared.prompt_styles.apply_styles(p) if type(p.prompt) == list: - all_prompts = p.prompt + p.all_prompts = p.prompt else: - all_prompts = p.batch_size * p.n_iter * [p.prompt] + p.all_prompts = p.batch_size * p.n_iter * [p.prompt] if type(seed) == list: - all_seeds = seed + p.all_seeds = seed else: - all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(all_prompts))] + p.all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(p.all_prompts))] if type(subseed) == list: - all_subseeds = subseed + p.all_subseeds = subseed else: - all_subseeds = [int(subseed) + x for x in range(len(all_prompts))] + p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))] def infotext(iteration=0, position_in_batch=0): - return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch) + return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch) if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings: model_hijack.embedding_db.load_textual_inversion_embeddings() + if p.scripts is not None: + p.scripts.run_alwayson_scripts(p) + infotexts = [] output_images = [] with torch.no_grad(), p.sd_model.ema_scope(): with devices.autocast(): - p.init(all_prompts, all_seeds, all_subseeds) + p.init(p.all_prompts, p.all_seeds, p.all_subseeds) if state.job_count == -1: state.job_count = p.n_iter @@ -387,9 +396,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if state.interrupted: break - prompts = all_prompts[n * p.batch_size:(n + 1) * p.batch_size] - seeds = all_seeds[n * p.batch_size:(n + 1) * p.batch_size] - subseeds = all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] + prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size] + seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size] + subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] if (len(prompts) == 0): break @@ -490,10 +499,10 @@ def process_images(p: StableDiffusionProcessing) -> Processed: index_of_first_image = 1 if opts.grid_save: - images.save_image(grid, p.outpath_grids, "grid", all_seeds[0], all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True) + images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True) devices.torch_gc() - return Processed(p, output_images, all_seeds[0], infotext() + "".join(["\n\n" + x for x in comments]), subseed=all_subseeds[0], all_prompts=all_prompts, all_seeds=all_seeds, all_subseeds=all_subseeds, index_of_first_image=index_of_first_image, infotexts=infotexts) + return Processed(p, output_images, p.all_seeds[0], infotext() + "".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], all_prompts=p.all_prompts, all_seeds=p.all_seeds, all_subseeds=p.all_subseeds, index_of_first_image=index_of_first_image, infotexts=infotexts) class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py new file mode 100644 index 00000000..866b7acd --- /dev/null +++ b/modules/script_callbacks.py @@ -0,0 +1,42 @@ + +callbacks_model_loaded = [] +callbacks_ui_tabs = [] + + +def clear_callbacks(): + callbacks_model_loaded.clear() + callbacks_ui_tabs.clear() + + +def model_loaded_callback(sd_model): + for callback in callbacks_model_loaded: + callback(sd_model) + + +def ui_tabs_callback(): + res = [] + + for callback in callbacks_ui_tabs: + res += callback() or [] + + return res + + +def on_model_loaded(callback): + """register a function to be called when the stable diffusion model is created; the model is + passed as an argument""" + callbacks_model_loaded.append(callback) + + +def on_ui_tabs(callback): + """register a function to be called when the UI is creating new tabs. + The function must either return a None, which means no new tabs to be added, or a list, where + each element is a tuple: + (gradio_component, title, elem_id) + + gradio_component is a gradio component to be used for contents of the tab (usually gr.Blocks) + title is tab text displayed to user in the UI + elem_id is HTML id for the tab + """ + callbacks_ui_tabs.append(callback) + diff --git a/modules/scripts.py b/modules/scripts.py index 1039fa9c..65f25f49 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -1,86 +1,153 @@ import os import sys import traceback +from collections import namedtuple import modules.ui as ui import gradio as gr from modules.processing import StableDiffusionProcessing -from modules import shared +from modules import shared, paths, script_callbacks + +AlwaysVisible = object() + class Script: filename = None args_from = None args_to = None + alwayson = False + + infotext_fields = None + """if set in ui(), this is a list of pairs of gradio component + text; the text will be used when + parsing infotext to set the value for the component; see ui.py's txt2img_paste_fields for an example + """ - # The title of the script. This is what will be displayed in the dropdown menu. def title(self): + """this function should return the title of the script. This is what will be displayed in the dropdown menu.""" + raise NotImplementedError() - # How the script is displayed in the UI. See https://gradio.app/docs/#components - # for the different UI components you can use and how to create them. - # Most UI components can return a value, such as a boolean for a checkbox. - # The returned values are passed to the run method as parameters. def ui(self, is_img2img): + """this function should create gradio UI elements. See https://gradio.app/docs/#components + The return value should be an array of all components that are used in processing. + Values of those returned componenbts will be passed to run() and process() functions. + """ + pass - # Determines when the script should be shown in the dropdown menu via the - # returned value. As an example: - # is_img2img is True if the current tab is img2img, and False if it is txt2img. - # Thus, return is_img2img to only show the script on the img2img tab. def show(self, is_img2img): + """ + is_img2img is True if this function is called for the img2img interface, and Fasle otherwise + + This function should return: + - False if the script should not be shown in UI at all + - True if the script should be shown in UI if it's scelected in the scripts drowpdown + - script.AlwaysVisible if the script should be shown in UI at all times + """ + return True - # This is where the additional processing is implemented. The parameters include - # self, the model object "p" (a StableDiffusionProcessing class, see - # processing.py), and the parameters returned by the ui method. - # Custom functions can be defined here, and additional libraries can be imported - # to be used in processing. The return value should be a Processed object, which is - # what is returned by the process_images method. - def run(self, *args): + def run(self, p, *args): + """ + This function is called if the script has been selected in the script dropdown. + It must do all processing and return the Processed object with results, same as + one returned by processing.process_images. + + Usually the processing is done by calling the processing.process_images function. + + args contains all values returned by components from ui() + """ + raise NotImplementedError() - # The description method is currently unused. - # To add a description that appears when hovering over the title, amend the "titles" - # dict in script.js to include the script title (returned by title) as a key, and - # your description as the value. + def process(self, p, *args): + """ + This function is called before processing begins for AlwaysVisible scripts. + scripts. You can modify the processing object (p) here, inject hooks, etc. + """ + + pass + def describe(self): + """unused""" return "" +current_basedir = paths.script_path + + +def basedir(): + """returns the base directory for the current script. For scripts in the main scripts directory, + this is the main directory (where webui.py resides), and for scripts in extensions directory + (ie extensions/aesthetic/script/aesthetic.py), this is extension's directory (extensions/aesthetic) + """ + return current_basedir + + scripts_data = [] +ScriptFile = namedtuple("ScriptFile", ["basedir", "filename", "path"]) +ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedir"]) + + +def list_scripts(scriptdirname, extension): + scripts_list = [] + + basedir = os.path.join(paths.script_path, scriptdirname) + if os.path.exists(basedir): + for filename in sorted(os.listdir(basedir)): + scripts_list.append(ScriptFile(paths.script_path, filename, os.path.join(basedir, filename))) + + extdir = os.path.join(paths.script_path, "extensions") + if os.path.exists(extdir): + for dirname in sorted(os.listdir(extdir)): + dirpath = os.path.join(extdir, dirname) + if not os.path.isdir(dirpath): + continue + for filename in sorted(os.listdir(os.path.join(dirpath, scriptdirname))): + scripts_list.append(ScriptFile(dirpath, filename, os.path.join(dirpath, scriptdirname, filename))) -def load_scripts(basedir): - if not os.path.exists(basedir): - return + scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] - for filename in sorted(os.listdir(basedir)): - path = os.path.join(basedir, filename) + return scripts_list - if os.path.splitext(path)[1].lower() != '.py': - continue - if not os.path.isfile(path): - continue +def load_scripts(): + global current_basedir + scripts_data.clear() + script_callbacks.clear_callbacks() + + scripts_list = list_scripts("scripts", ".py") + + syspath = sys.path + for scriptfile in sorted(scripts_list): try: - with open(path, "r", encoding="utf8") as file: + if scriptfile.basedir != paths.script_path: + sys.path = [scriptfile.basedir] + sys.path + current_basedir = scriptfile.basedir + + with open(scriptfile.path, "r", encoding="utf8") as file: text = file.read() from types import ModuleType - compiled = compile(text, path, 'exec') - module = ModuleType(filename) + compiled = compile(text, scriptfile.path, 'exec') + module = ModuleType(scriptfile.filename) exec(compiled, module.__dict__) for key, script_class in module.__dict__.items(): if type(script_class) == type and issubclass(script_class, Script): - scripts_data.append((script_class, path)) + scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir)) except Exception: - print(f"Error loading script: {filename}", file=sys.stderr) + print(f"Error loading script: {scriptfile.filename}", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) + finally: + sys.path = syspath + current_basedir = paths.script_path + def wrap_call(func, filename, funcname, *args, default=None, **kwargs): try: @@ -96,56 +163,80 @@ def wrap_call(func, filename, funcname, *args, default=None, **kwargs): class ScriptRunner: def __init__(self): self.scripts = [] + self.selectable_scripts = [] + self.alwayson_scripts = [] self.titles = [] + self.infotext_fields = [] def setup_ui(self, is_img2img): - for script_class, path in scripts_data: + for script_class, path, basedir in scripts_data: script = script_class() script.filename = path - if not script.show(is_img2img): - continue + visibility = script.show(is_img2img) - self.scripts.append(script) + if visibility == AlwaysVisible: + self.scripts.append(script) + self.alwayson_scripts.append(script) + script.alwayson = True - self.titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.scripts] + elif visibility: + self.scripts.append(script) + self.selectable_scripts.append(script) - dropdown = gr.Dropdown(label="Script", choices=["None"] + self.titles, value="None", type="index") - dropdown.save_to_config = True - inputs = [dropdown] + self.titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.selectable_scripts] + + inputs = [None] + inputs_alwayson = [True] - for script in self.scripts: + def create_script_ui(script, inputs, inputs_alwayson): script.args_from = len(inputs) script.args_to = len(inputs) controls = wrap_call(script.ui, script.filename, "ui", is_img2img) if controls is None: - continue + return for control in controls: control.custom_script_source = os.path.basename(script.filename) - control.visible = False + if not script.alwayson: + control.visible = False + + if script.infotext_fields is not None: + self.infotext_fields += script.infotext_fields inputs += controls + inputs_alwayson += [script.alwayson for _ in controls] script.args_to = len(inputs) + for script in self.alwayson_scripts: + with gr.Group(): + create_script_ui(script, inputs, inputs_alwayson) + + dropdown = gr.Dropdown(label="Script", choices=["None"] + self.titles, value="None", type="index") + dropdown.save_to_config = True + inputs[0] = dropdown + + for script in self.selectable_scripts: + create_script_ui(script, inputs, inputs_alwayson) + def select_script(script_index): - if 0 < script_index <= len(self.scripts): - script = self.scripts[script_index-1] + if 0 < script_index <= len(self.selectable_scripts): + script = self.selectable_scripts[script_index-1] args_from = script.args_from args_to = script.args_to else: args_from = 0 args_to = 0 - return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))] + return [ui.gr_show(True if i == 0 else args_from <= i < args_to or is_alwayson) for i, is_alwayson in enumerate(inputs_alwayson)] def init_field(title): if title == 'None': return script_index = self.titles.index(title) - script = self.scripts[script_index] + script = self.selectable_scripts[script_index] for i in range(script.args_from, script.args_to): inputs[i].visible = True @@ -164,7 +255,7 @@ class ScriptRunner: if script_index == 0: return None - script = self.scripts[script_index-1] + script = self.selectable_scripts[script_index-1] if script is None: return None @@ -176,6 +267,15 @@ class ScriptRunner: return processed + def run_alwayson_scripts(self, p): + for script in self.alwayson_scripts: + try: + script_args = p.script_args[script.args_from:script.args_to] + script.process(p, *script_args) + except Exception: + print(f"Error running alwayson script: {script.filename}", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + def reload_sources(self): for si, script in list(enumerate(self.scripts)): with open(script.filename, "r", encoding="utf8") as file: @@ -197,19 +297,21 @@ class ScriptRunner: self.scripts[si].args_from = args_from self.scripts[si].args_to = args_to + scripts_txt2img = ScriptRunner() scripts_img2img = ScriptRunner() + def reload_script_body_only(): scripts_txt2img.reload_sources() scripts_img2img.reload_sources() -def reload_scripts(basedir): +def reload_scripts(): global scripts_txt2img, scripts_img2img - scripts_data.clear() - load_scripts(basedir) + load_scripts() scripts_txt2img = ScriptRunner() scripts_img2img = ScriptRunner() + diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 1f8587d1..0f10828e 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -332,7 +332,6 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): multipliers.append([1.0] * 75) z1 = self.process_tokens(tokens, multipliers) - z1 = shared.aesthetic_clip(z1, remade_batch_tokens) z = z1 if z is None else torch.cat((z, z1), axis=-2) remade_batch_tokens = rem_tokens diff --git a/modules/sd_models.py b/modules/sd_models.py index d99dbce8..f9b3063d 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -7,7 +7,7 @@ from omegaconf import OmegaConf from ldm.util import instantiate_from_config -from modules import shared, modelloader, devices +from modules import shared, modelloader, devices, script_callbacks from modules.paths import models_path from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting @@ -238,6 +238,9 @@ def load_model(checkpoint_info=None): sd_hijack.model_hijack.hijack(sd_model) sd_model.eval() + shared.sd_model = sd_model + + script_callbacks.model_loaded_callback(sd_model) print(f"Model loaded.") return sd_model @@ -252,7 +255,7 @@ def reload_model_weights(sd_model, info=None): if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info): checkpoints_loaded.clear() - shared.sd_model = load_model(checkpoint_info) + load_model(checkpoint_info) return shared.sd_model if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: diff --git a/modules/shared.py b/modules/shared.py index 0dbe360d..7d786f07 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -31,7 +31,6 @@ parser.add_argument("--no-half-vae", action='store_true', help="do not switch th parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)") parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI") parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)") -parser.add_argument("--aesthetic_embeddings-dir", type=str, default=os.path.join(models_path, 'aesthetic_embeddings'), help="aesthetic_embeddings directory(default: aesthetic_embeddings)") parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory") parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory") parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui") @@ -109,21 +108,6 @@ os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True) hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir) loaded_hypernetwork = None - -os.makedirs(cmd_opts.aesthetic_embeddings_dir, exist_ok=True) -aesthetic_embeddings = {} - - -def update_aesthetic_embeddings(): - global aesthetic_embeddings - aesthetic_embeddings = {f.replace(".pt", ""): os.path.join(cmd_opts.aesthetic_embeddings_dir, f) for f in - os.listdir(cmd_opts.aesthetic_embeddings_dir) if f.endswith(".pt")} - aesthetic_embeddings = OrderedDict(**{"None": None}, **aesthetic_embeddings) - - -update_aesthetic_embeddings() - - def reload_hypernetworks(): global hypernetworks @@ -415,9 +399,6 @@ sd_model = None clip_model = None -from modules.aesthetic_clip import AestheticCLIP -aesthetic_clip = AestheticCLIP() - progress_print_out = sys.stdout diff --git a/modules/txt2img.py b/modules/txt2img.py index 1761cfa2..c9d5a090 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -7,7 +7,7 @@ import modules.processing as processing from modules.ui import plaintext_to_html -def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, firstphase_width: int, firstphase_height: int, aesthetic_lr=0, aesthetic_weight=0, aesthetic_steps=0, aesthetic_imgs=None, aesthetic_slerp=False, aesthetic_imgs_text="", aesthetic_slerp_angle=0.15, aesthetic_text_negative=False, *args): +def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, firstphase_width: int, firstphase_height: int, *args): p = StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, @@ -36,7 +36,8 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: firstphase_height=firstphase_height if enable_hr else None, ) - shared.aesthetic_clip.set_aesthetic_params(p, float(aesthetic_lr), float(aesthetic_weight), int(aesthetic_steps), aesthetic_imgs, aesthetic_slerp, aesthetic_imgs_text, aesthetic_slerp_angle, aesthetic_text_negative) + p.scripts = modules.scripts.scripts_txt2img + p.script_args = args if cmd_opts.enable_console_prompts: print(f"\ntxt2img: {prompt}", file=shared.progress_print_out) diff --git a/modules/ui.py b/modules/ui.py index 70a9cf10..c977482c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -23,10 +23,10 @@ import gradio as gr import gradio.utils import gradio.routes -from modules import sd_hijack, sd_models, localization +from modules import sd_hijack, sd_models, localization, script_callbacks from modules.paths import script_path -from modules.shared import opts, cmd_opts, restricted_opts, aesthetic_embeddings +from modules.shared import opts, cmd_opts, restricted_opts if cmd_opts.deepdanbooru: from modules.deepbooru import get_deepbooru_tags @@ -44,7 +44,6 @@ from modules.images import save_image import modules.textual_inversion.ui import modules.hypernetworks.ui -import modules.aesthetic_clip as aesthetic_clip import modules.images_history as img_his @@ -662,8 +661,6 @@ def create_ui(wrap_gradio_gpu_call): seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs() - aesthetic_weight, aesthetic_steps, aesthetic_lr, aesthetic_slerp, aesthetic_imgs, aesthetic_imgs_text, aesthetic_slerp_angle, aesthetic_text_negative = aesthetic_clip.create_ui() - with gr.Group(): custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False) @@ -718,14 +715,6 @@ def create_ui(wrap_gradio_gpu_call): denoising_strength, firstphase_width, firstphase_height, - aesthetic_lr, - aesthetic_weight, - aesthetic_steps, - aesthetic_imgs, - aesthetic_slerp, - aesthetic_imgs_text, - aesthetic_slerp_angle, - aesthetic_text_negative ] + custom_inputs, outputs=[ @@ -804,14 +793,7 @@ def create_ui(wrap_gradio_gpu_call): (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), (firstphase_width, "First pass size-1"), (firstphase_height, "First pass size-2"), - (aesthetic_lr, "Aesthetic LR"), - (aesthetic_weight, "Aesthetic weight"), - (aesthetic_steps, "Aesthetic steps"), - (aesthetic_imgs, "Aesthetic embedding"), - (aesthetic_slerp, "Aesthetic slerp"), - (aesthetic_imgs_text, "Aesthetic text"), - (aesthetic_text_negative, "Aesthetic text negative"), - (aesthetic_slerp_angle, "Aesthetic slerp angle"), + *modules.scripts.scripts_txt2img.infotext_fields ] txt2img_preview_params = [ @@ -896,8 +878,6 @@ def create_ui(wrap_gradio_gpu_call): seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs() - aesthetic_weight_im, aesthetic_steps_im, aesthetic_lr_im, aesthetic_slerp_im, aesthetic_imgs_im, aesthetic_imgs_text_im, aesthetic_slerp_angle_im, aesthetic_text_negative_im = aesthetic_clip.create_ui() - with gr.Group(): custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True) @@ -988,14 +968,6 @@ def create_ui(wrap_gradio_gpu_call): inpainting_mask_invert, img2img_batch_input_dir, img2img_batch_output_dir, - aesthetic_lr_im, - aesthetic_weight_im, - aesthetic_steps_im, - aesthetic_imgs_im, - aesthetic_slerp_im, - aesthetic_imgs_text_im, - aesthetic_slerp_angle_im, - aesthetic_text_negative_im, ] + custom_inputs, outputs=[ img2img_gallery, @@ -1087,14 +1059,7 @@ def create_ui(wrap_gradio_gpu_call): (seed_resize_from_w, "Seed resize from-1"), (seed_resize_from_h, "Seed resize from-2"), (denoising_strength, "Denoising strength"), - (aesthetic_lr_im, "Aesthetic LR"), - (aesthetic_weight_im, "Aesthetic weight"), - (aesthetic_steps_im, "Aesthetic steps"), - (aesthetic_imgs_im, "Aesthetic embedding"), - (aesthetic_slerp_im, "Aesthetic slerp"), - (aesthetic_imgs_text_im, "Aesthetic text"), - (aesthetic_text_negative_im, "Aesthetic text negative"), - (aesthetic_slerp_angle_im, "Aesthetic slerp angle"), + *modules.scripts.scripts_img2img.infotext_fields ] token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) @@ -1217,9 +1182,9 @@ def create_ui(wrap_gradio_gpu_call): ) #images history images_history_switch_dict = { - "fn":modules.generation_parameters_copypaste.connect_paste, - "t2i":txt2img_paste_fields, - "i2i":img2img_paste_fields + "fn": modules.generation_parameters_copypaste.connect_paste, + "t2i": txt2img_paste_fields, + "i2i": img2img_paste_fields } images_history = img_his.create_history_tabs(gr, opts, cmd_opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict) @@ -1264,18 +1229,6 @@ def create_ui(wrap_gradio_gpu_call): with gr.Column(): create_embedding = gr.Button(value="Create embedding", variant='primary') - with gr.Tab(label="Create aesthetic images embedding"): - - new_embedding_name_ae = gr.Textbox(label="Name") - process_src_ae = gr.Textbox(label='Source directory') - batch_ae = gr.Slider(minimum=1, maximum=1024, step=1, label="Batch size", value=256) - with gr.Row(): - with gr.Column(scale=3): - gr.HTML(value="") - - with gr.Column(): - create_embedding_ae = gr.Button(value="Create images embedding", variant='primary') - with gr.Tab(label="Create hypernetwork"): new_hypernetwork_name = gr.Textbox(label="Name") new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "320", "640", "1280"]) @@ -1375,21 +1328,6 @@ def create_ui(wrap_gradio_gpu_call): ] ) - create_embedding_ae.click( - fn=aesthetic_clip.generate_imgs_embd, - inputs=[ - new_embedding_name_ae, - process_src_ae, - batch_ae - ], - outputs=[ - aesthetic_imgs, - aesthetic_imgs_im, - ti_output, - ti_outcome, - ] - ) - create_hypernetwork.click( fn=modules.hypernetworks.ui.create_hypernetwork, inputs=[ @@ -1580,10 +1518,10 @@ Requested path was: {f} if not opts.same_type(value, opts.data_labels[key].default): return gr.update(visible=True), opts.dumpjson() + oldval = opts.data.get(key, None) if cmd_opts.hide_ui_dir_config and key in restricted_opts: return gr.update(value=oldval), opts.dumpjson() - oldval = opts.data.get(key, None) opts.data[key] = value if oldval != value: @@ -1692,9 +1630,12 @@ Requested path was: {f} (images_history, "Image Browser", "images_history"), (modelmerger_interface, "Checkpoint Merger", "modelmerger"), (train_interface, "Train", "ti"), - (settings_interface, "Settings", "settings"), ] + interfaces += script_callbacks.ui_tabs_callback() + + interfaces += [(settings_interface, "Settings", "settings")] + with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file: css = file.read() -- cgit v1.2.1 From 6398dc9b1049f242576ca309f95a3fb1e654951c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 22 Oct 2022 13:34:49 +0300 Subject: further support for extensions --- modules/scripts.py | 44 +++++++++++++++++++++++++++++++++++--------- modules/ui.py | 19 ++++++++++--------- 2 files changed, 45 insertions(+), 18 deletions(-) (limited to 'modules') diff --git a/modules/scripts.py b/modules/scripts.py index 65f25f49..9323af3e 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -102,17 +102,39 @@ def list_scripts(scriptdirname, extension): if os.path.exists(extdir): for dirname in sorted(os.listdir(extdir)): dirpath = os.path.join(extdir, dirname) - if not os.path.isdir(dirpath): + scriptdirpath = os.path.join(dirpath, scriptdirname) + + if not os.path.isdir(scriptdirpath): continue - for filename in sorted(os.listdir(os.path.join(dirpath, scriptdirname))): - scripts_list.append(ScriptFile(dirpath, filename, os.path.join(dirpath, scriptdirname, filename))) + for filename in sorted(os.listdir(scriptdirpath)): + scripts_list.append(ScriptFile(dirpath, filename, os.path.join(scriptdirpath, filename))) scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] return scripts_list +def list_files_with_name(filename): + res = [] + + dirs = [paths.script_path] + + extdir = os.path.join(paths.script_path, "extensions") + if os.path.exists(extdir): + dirs += [os.path.join(extdir, d) for d in sorted(os.listdir(extdir))] + + for dirpath in dirs: + if not os.path.isdir(dirpath): + continue + + path = os.path.join(dirpath, filename) + if os.path.isfile(filename): + res.append(path) + + return res + + def load_scripts(): global current_basedir scripts_data.clear() @@ -276,7 +298,7 @@ class ScriptRunner: print(f"Error running alwayson script: {script.filename}", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) - def reload_sources(self): + def reload_sources(self, cache): for si, script in list(enumerate(self.scripts)): with open(script.filename, "r", encoding="utf8") as file: args_from = script.args_from @@ -286,9 +308,12 @@ class ScriptRunner: from types import ModuleType - compiled = compile(text, filename, 'exec') - module = ModuleType(script.filename) - exec(compiled, module.__dict__) + module = cache.get(filename, None) + if module is None: + compiled = compile(text, filename, 'exec') + module = ModuleType(script.filename) + exec(compiled, module.__dict__) + cache[filename] = module for key, script_class in module.__dict__.items(): if type(script_class) == type and issubclass(script_class, Script): @@ -303,8 +328,9 @@ scripts_img2img = ScriptRunner() def reload_script_body_only(): - scripts_txt2img.reload_sources() - scripts_img2img.reload_sources() + cache = {} + scripts_txt2img.reload_sources(cache) + scripts_img2img.reload_sources(cache) def reload_scripts(): diff --git a/modules/ui.py b/modules/ui.py index c977482c..29986124 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1636,13 +1636,15 @@ Requested path was: {f} interfaces += [(settings_interface, "Settings", "settings")] - with open(os.path.join(script_path, "style.css"), "r", encoding="utf8") as file: - css = file.read() + css = "" + + for cssfile in modules.scripts.list_files_with_name("style.css"): + with open(cssfile, "r", encoding="utf8") as file: + css += file.read() + "\n" if os.path.exists(os.path.join(script_path, "user.css")): with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file: - usercss = file.read() - css += usercss + css += file.read() + "\n" if not cmd_opts.no_progressbar_hiding: css += css_hide_progressbar @@ -1865,9 +1867,9 @@ def load_javascript(raw_response): with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile: javascript = f'' - jsdir = os.path.join(script_path, "javascript") - for filename in sorted(os.listdir(jsdir)): - with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile: + scripts_list = modules.scripts.list_scripts("javascript", ".js") + for basedir, filename, path in scripts_list: + with open(path, "r", encoding="utf8") as jsfile: javascript += f"\n" if cmd_opts.theme is not None: @@ -1885,6 +1887,5 @@ def load_javascript(raw_response): gradio.routes.templates.TemplateResponse = template_response -reload_javascript = partial(load_javascript, - gradio.routes.templates.TemplateResponse) +reload_javascript = partial(load_javascript, gradio.routes.templates.TemplateResponse) reload_javascript() -- cgit v1.2.1 From 50b5504401e50b6c94eba41b37fe212b2f27b792 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 22 Oct 2022 14:04:14 +0300 Subject: remove parsing command line from devices.py --- modules/devices.py | 14 +++++--------- modules/lowvram.py | 9 ++++----- 2 files changed, 9 insertions(+), 14 deletions(-) (limited to 'modules') diff --git a/modules/devices.py b/modules/devices.py index 8a159282..dc1f3cdd 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -15,14 +15,10 @@ def extract_device_id(args, name): def get_optimal_device(): if torch.cuda.is_available(): - # CUDA device selection support: - if "shared" not in sys.modules: - commandline_args = os.environ.get('COMMANDLINE_ARGS', "") #re-parse the commandline arguments because using the shared.py module creates an import loop. - sys.argv += shlex.split(commandline_args) - device_id = extract_device_id(sys.argv, '--device-id') - else: - device_id = shared.cmd_opts.device_id - + from modules import shared + + device_id = shared.cmd_opts.device_id + if device_id is not None: cuda_device = f"cuda:{device_id}" return torch.device(cuda_device) @@ -49,7 +45,7 @@ def enable_tf32(): errors.run(enable_tf32, "Enabling TF32") -device = device_interrogate = device_gfpgan = device_bsrgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device() +device = device_interrogate = device_gfpgan = device_bsrgan = device_esrgan = device_scunet = device_codeformer = None dtype = torch.float16 dtype_vae = torch.float16 diff --git a/modules/lowvram.py b/modules/lowvram.py index 7eba1349..f327c3df 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -1,9 +1,8 @@ import torch -from modules.devices import get_optimal_device +from modules import devices module_in_gpu = None cpu = torch.device("cpu") -device = gpu = get_optimal_device() def send_everything_to_cpu(): @@ -33,7 +32,7 @@ def setup_for_low_vram(sd_model, use_medvram): if module_in_gpu is not None: module_in_gpu.to(cpu) - module.to(gpu) + module.to(devices.device) module_in_gpu = module # see below for register_forward_pre_hook; @@ -51,7 +50,7 @@ def setup_for_low_vram(sd_model, use_medvram): # send the model to GPU. Then put modules back. the modules will be in CPU. stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model = None, None, None - sd_model.to(device) + sd_model.to(devices.device) sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.model = stored # register hooks for those the first two models @@ -70,7 +69,7 @@ def setup_for_low_vram(sd_model, use_medvram): # so that only one of them is in GPU at a time stored = diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = None, None, None, None - sd_model.model.to(device) + sd_model.model.to(devices.device) diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = stored # install hooks for bits of third model -- cgit v1.2.1 From 0e8ca8e7af05be22d7d2c07a47c3c7febe0f0ab6 Mon Sep 17 00:00:00 2001 From: discus0434 Date: Sat, 22 Oct 2022 11:07:00 +0000 Subject: add dropout --- modules/hypernetworks/hypernetwork.py | 68 +++++++++++++++++++++-------------- modules/hypernetworks/ui.py | 10 +++--- modules/ui.py | 43 +++++++++++----------- 3 files changed, 70 insertions(+), 51 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 905cbeef..e493f366 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -1,47 +1,60 @@ +import csv import datetime import glob import html import os import sys import traceback -import tqdm -import csv +import modules.textual_inversion.dataset import torch - -from ldm.util import default -from modules import devices, shared, processing, sd_models -import torch -from torch import einsum +import tqdm from einops import rearrange, repeat -import modules.textual_inversion.dataset +from ldm.util import default +from modules import devices, processing, sd_models, shared from modules.textual_inversion import textual_inversion from modules.textual_inversion.learn_schedule import LearnRateScheduler +from torch import einsum class HypernetworkModule(torch.nn.Module): multiplier = 1.0 - activation_dict = {"relu": torch.nn.ReLU, "leakyrelu": torch.nn.LeakyReLU, "elu": torch.nn.ELU, - "swish": torch.nn.Hardswish} - - def __init__(self, dim, state_dict=None, layer_structure=None, add_layer_norm=False, activation_func=None): + activation_dict = { + "relu": torch.nn.ReLU, + "leakyrelu": torch.nn.LeakyReLU, + "elu": torch.nn.ELU, + "swish": torch.nn.Hardswish, + } + + def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False): super().__init__() assert layer_structure is not None, "layer_structure must not be None" assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!" assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!" - + assert activation_func not in self.activation_dict.keys() + "linear", f"Valid activation funcs: 'linear', 'relu', 'leakyrelu', 'elu', 'swish'" + linears = [] for i in range(len(layer_structure) - 1): + + # Add a fully-connected layer linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1]))) - # if skip_first_layer because first parameters potentially contain negative values - # if i < 1: continue - if activation_func in HypernetworkModule.activation_dict: - linears.append(HypernetworkModule.activation_dict[activation_func]()) + + # Add an activation func + if activation_func == "linear": + pass + elif activation_func in self.activation_dict: + linears.append(self.activation_dict[activation_func]()) else: - print("Invalid key {} encountered as activation function!".format(activation_func)) - # if use_dropout: - # linears.append(torch.nn.Dropout(p=0.3)) + raise NotImplementedError( + "Valid activation funcs: 'linear', 'relu', 'leakyrelu', 'elu', 'swish'" + ) + + # Add dropout + if use_dropout: + linears.append(torch.nn.Dropout(p=0.3)) + + # Add layer normalization if add_layer_norm: linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) @@ -93,7 +106,7 @@ class Hypernetwork: filename = None name = None - def __init__(self, name=None, enable_sizes=None, layer_structure=None, add_layer_norm=False, activation_func=None): + def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False): self.filename = None self.name = name self.layers = {} @@ -101,13 +114,14 @@ class Hypernetwork: self.sd_checkpoint = None self.sd_checkpoint_name = None self.layer_structure = layer_structure - self.add_layer_norm = add_layer_norm self.activation_func = activation_func + self.add_layer_norm = add_layer_norm + self.use_dropout = use_dropout for size in enable_sizes or []: self.layers[size] = ( - HypernetworkModule(size, None, self.layer_structure, self.add_layer_norm, self.activation_func), - HypernetworkModule(size, None, self.layer_structure, self.add_layer_norm, self.activation_func), + HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout), + HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout), ) def weights(self): @@ -129,8 +143,9 @@ class Hypernetwork: state_dict['step'] = self.step state_dict['name'] = self.name state_dict['layer_structure'] = self.layer_structure - state_dict['is_layer_norm'] = self.add_layer_norm state_dict['activation_func'] = self.activation_func + state_dict['is_layer_norm'] = self.add_layer_norm + state_dict['use_dropout'] = self.use_dropout state_dict['sd_checkpoint'] = self.sd_checkpoint state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name @@ -144,8 +159,9 @@ class Hypernetwork: state_dict = torch.load(filename, map_location='cpu') self.layer_structure = state_dict.get('layer_structure', [1, 2, 1]) - self.add_layer_norm = state_dict.get('is_layer_norm', False) self.activation_func = state_dict.get('activation_func', None) + self.add_layer_norm = state_dict.get('is_layer_norm', False) + self.use_dropout = state_dict.get('use_dropout', False) for size, sd in state_dict.items(): if type(size) == int: diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index 1a5a27d8..5f6f17b6 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -3,14 +3,13 @@ import os import re import gradio as gr - -import modules.textual_inversion.textual_inversion import modules.textual_inversion.preprocess -from modules import sd_hijack, shared, devices +import modules.textual_inversion.textual_inversion +from modules import devices, sd_hijack, shared from modules.hypernetworks import hypernetwork -def create_hypernetwork(name, enable_sizes, layer_structure=None, add_layer_norm=False, activation_func=None): +def create_hypernetwork(name, enable_sizes, layer_structure=None, activation_func=None, add_layer_norm=False, use_dropout=False): fn = os.path.join(shared.cmd_opts.hypernetwork_dir, f"{name}.pt") assert not os.path.exists(fn), f"file {fn} already exists" @@ -21,8 +20,9 @@ def create_hypernetwork(name, enable_sizes, layer_structure=None, add_layer_norm name=name, enable_sizes=[int(x) for x in enable_sizes], layer_structure=layer_structure, - add_layer_norm=add_layer_norm, activation_func=activation_func, + add_layer_norm=add_layer_norm, + use_dropout=use_dropout, ) hypernet.save(fn) diff --git a/modules/ui.py b/modules/ui.py index 716f14b8..d4b32c05 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -5,43 +5,44 @@ import json import math import mimetypes import os +import platform import random +import subprocess as sp import sys import tempfile import time import traceback -import platform -import subprocess as sp from functools import partial, reduce +import gradio as gr +import gradio.routes +import gradio.utils import numpy as np +import piexif import torch from PIL import Image, PngImagePlugin -import piexif -import gradio as gr -import gradio.utils -import gradio.routes - -from modules import sd_hijack, sd_models, localization +from modules import localization, sd_hijack, sd_models from modules.paths import script_path -from modules.shared import opts, cmd_opts, restricted_opts +from modules.shared import cmd_opts, opts, restricted_opts + if cmd_opts.deepdanbooru: from modules.deepbooru import get_deepbooru_tags -import modules.shared as shared -from modules.sd_samplers import samplers, samplers_for_img2img -from modules.sd_hijack import model_hijack + +import modules.codeformer_model +import modules.generation_parameters_copypaste +import modules.gfpgan_model +import modules.hypernetworks.ui +import modules.images_history as img_his import modules.ldsr_model import modules.scripts -import modules.gfpgan_model -import modules.codeformer_model +import modules.shared as shared import modules.styles -import modules.generation_parameters_copypaste +import modules.textual_inversion.ui from modules import prompt_parser from modules.images import save_image -import modules.textual_inversion.ui -import modules.hypernetworks.ui -import modules.images_history as img_his +from modules.sd_hijack import model_hijack +from modules.sd_samplers import samplers, samplers_for_img2img # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI mimetypes.init() @@ -1223,8 +1224,9 @@ def create_ui(wrap_gradio_gpu_call): new_hypernetwork_name = gr.Textbox(label="Name") new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "320", "640", "1280"]) new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'") + new_hypernetwork_activation_func = gr.Dropdown(value="relu", label="Select activation function of hypernetwork", choices=["linear", "relu", "leakyrelu", "elu", "swish"]) new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization") - new_hypernetwork_activation_func = gr.Dropdown(value="relu", label="Select activation function of hypernetwork", choices=["linear", "relu", "leakyrelu"]) + new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout") with gr.Row(): with gr.Column(scale=3): @@ -1308,8 +1310,9 @@ def create_ui(wrap_gradio_gpu_call): new_hypernetwork_name, new_hypernetwork_sizes, new_hypernetwork_layer_structure, - new_hypernetwork_add_layer_norm, new_hypernetwork_activation_func, + new_hypernetwork_add_layer_norm, + new_hypernetwork_use_dropout ], outputs=[ train_hypernetwork_name, -- cgit v1.2.1 From 1cd3ed7def40198f46d30f74dd37d2906ebdbaa6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 22 Oct 2022 14:28:56 +0300 Subject: fix for extensions without style.css --- modules/ui.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index 29986124..d8d52db1 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1639,6 +1639,9 @@ Requested path was: {f} css = "" for cssfile in modules.scripts.list_files_with_name("style.css"): + if not os.path.isfile(cssfile): + continue + with open(cssfile, "r", encoding="utf8") as file: css += file.read() + "\n" -- cgit v1.2.1 From 7fd90128eb6d1820045bfe2c2c1269661023a712 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 22 Oct 2022 14:48:43 +0300 Subject: added a guard for hypernet training that will stop early if weights are getting no gradients --- modules/hypernetworks/hypernetwork.py | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 47d91ea5..46039a49 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -310,6 +310,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate) + steps_without_grad = 0 + pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) for i, entries in pbar: hypernetwork.step = i + ititial_step @@ -332,8 +334,17 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log losses[hypernetwork.step % losses.shape[0]] = loss.item() optimizer.zero_grad() + weights[0].grad = None loss.backward() + + if weights[0].grad is None: + steps_without_grad += 1 + else: + steps_without_grad = 0 + assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue' + optimizer.step() + mean_loss = losses.mean() if torch.isnan(mean_loss): raise RuntimeError("Loss diverged.") -- cgit v1.2.1 From fccba4729db341a299db3343e3264fecd9459a07 Mon Sep 17 00:00:00 2001 From: discus0434 Date: Sat, 22 Oct 2022 12:02:41 +0000 Subject: add an option to avoid dying relu --- modules/hypernetworks/hypernetwork.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index b7a04038..3132a56c 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -32,7 +32,6 @@ class HypernetworkModule(torch.nn.Module): assert layer_structure is not None, "layer_structure must not be None" assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!" assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!" - assert activation_func not in self.activation_dict.keys() + "linear", f"Valid activation funcs: 'linear', 'relu', 'leakyrelu', 'elu', 'swish'" linears = [] for i in range(len(layer_structure) - 1): @@ -43,12 +42,13 @@ class HypernetworkModule(torch.nn.Module): # Add an activation func if activation_func == "linear" or activation_func is None: pass + # If ReLU, Skip adding it to the first layer to avoid dying ReLU + elif activation_func == "relu" and i < 1: + pass elif activation_func in self.activation_dict: linears.append(self.activation_dict[activation_func]()) else: - raise RuntimeError( - "Valid activation funcs: 'linear', 'relu', 'leakyrelu', 'elu', 'swish'" - ) + raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}') # Add dropout if use_dropout: @@ -166,8 +166,8 @@ class Hypernetwork: for size, sd in state_dict.items(): if type(size) == int: self.layers[size] = ( - HypernetworkModule(size, sd[0], self.layer_structure, self.add_layer_norm, self.activation_func), - HypernetworkModule(size, sd[1], self.layer_structure, self.add_layer_norm, self.activation_func), + HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout), + HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.add_layer_norm, self.use_dropout), ) self.name = state_dict.get('name', self.name) -- cgit v1.2.1 From 7912acef725832debef58c4c7bf8ec22fb446c0b Mon Sep 17 00:00:00 2001 From: discus0434 Date: Sat, 22 Oct 2022 13:00:44 +0000 Subject: small fix --- modules/hypernetworks/hypernetwork.py | 12 +++++------- modules/ui.py | 1 - 2 files changed, 5 insertions(+), 8 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 3132a56c..7d12e0ff 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -42,22 +42,20 @@ class HypernetworkModule(torch.nn.Module): # Add an activation func if activation_func == "linear" or activation_func is None: pass - # If ReLU, Skip adding it to the first layer to avoid dying ReLU - elif activation_func == "relu" and i < 1: - pass elif activation_func in self.activation_dict: linears.append(self.activation_dict[activation_func]()) else: raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}') - # Add dropout - if use_dropout: - linears.append(torch.nn.Dropout(p=0.3)) - # Add layer normalization if add_layer_norm: linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) + # Add dropout + if use_dropout: + p = 0.5 if 0 <= i <= len(layer_structure) - 3 else 0.2 + linears.append(torch.nn.Dropout(p=p)) + self.linear = torch.nn.Sequential(*linears) if state_dict is not None: diff --git a/modules/ui.py b/modules/ui.py index cd118552..eca887ca 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1244,7 +1244,6 @@ def create_ui(wrap_gradio_gpu_call): new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization") new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout") overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork") - new_hypernetwork_activation_func = gr.Dropdown(value="relu", label="Select activation function of hypernetwork", choices=["linear", "relu", "leakyrelu"]) with gr.Row(): with gr.Column(scale=3): -- cgit v1.2.1 From 6a4fa73a38935a18779ce1809892730fd1572bee Mon Sep 17 00:00:00 2001 From: discus0434 Date: Sat, 22 Oct 2022 13:44:39 +0000 Subject: small fix --- modules/hypernetworks/hypernetwork.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 3372aae2..3bc71ee5 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -51,10 +51,9 @@ class HypernetworkModule(torch.nn.Module): if add_layer_norm: linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) - # Add dropout - if use_dropout: - p = 0.5 if 0 <= i <= len(layer_structure) - 3 else 0.2 - linears.append(torch.nn.Dropout(p=p)) + # Add dropout expect last layer + if use_dropout and i < len(layer_structure) - 3: + linears.append(torch.nn.Dropout(p=0.3)) self.linear = torch.nn.Sequential(*linears) -- cgit v1.2.1 From d37cfffd537cd29309afbcb192c4f979995c6a34 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 22 Oct 2022 19:18:56 +0300 Subject: added callback for creating new settings in extensions --- modules/script_callbacks.py | 11 +++++++++++ modules/shared.py | 19 +++++++++++++++++-- modules/ui.py | 6 +++++- 3 files changed, 33 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 866b7acd..1270e50f 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -1,6 +1,7 @@ callbacks_model_loaded = [] callbacks_ui_tabs = [] +callbacks_ui_settings = [] def clear_callbacks(): @@ -22,6 +23,11 @@ def ui_tabs_callback(): return res +def ui_settings_callback(): + for callback in callbacks_ui_settings: + callback() + + def on_model_loaded(callback): """register a function to be called when the stable diffusion model is created; the model is passed as an argument""" @@ -40,3 +46,8 @@ def on_ui_tabs(callback): """ callbacks_ui_tabs.append(callback) + +def on_ui_settings(callback): + """register a function to be called before UI settingsare populated; add your settings + by using shared.opts.add_option(shared.OptionInfo(...)) """ + callbacks_ui_settings.append(callback) diff --git a/modules/shared.py b/modules/shared.py index 5d83971e..d9cb65ef 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -165,13 +165,13 @@ def realesrgan_models_names(): class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, show_on_main_page=False, refresh=None): + def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None): self.default = default self.label = label self.component = component self.component_args = component_args self.onchange = onchange - self.section = None + self.section = section self.refresh = refresh @@ -327,6 +327,7 @@ options_templates.update(options_section(('images-history', "Images Browser"), { })) + class Options: data = None data_labels = options_templates @@ -389,6 +390,20 @@ class Options: d = {k: self.data.get(k, self.data_labels.get(k).default) for k in self.data_labels.keys()} return json.dumps(d) + def add_option(self, key, info): + self.data_labels[key] = info + + def reorder(self): + """reorder settings so that all items related to section always go together""" + + section_ids = {} + settings_items = self.data_labels.items() + for k, item in settings_items: + if item.section not in section_ids: + section_ids[item.section] = len(section_ids) + + self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])} + opts = Options() if os.path.exists(config_filename): diff --git a/modules/ui.py b/modules/ui.py index d8d52db1..2849b111 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1461,6 +1461,9 @@ def create_ui(wrap_gradio_gpu_call): components = [] component_dict = {} + script_callbacks.ui_settings_callback() + opts.reorder() + def open_folder(f): if not os.path.exists(f): print(f'Folder "{f}" does not exist. After you create an image, the folder will be created.') @@ -1564,7 +1567,8 @@ Requested path was: {f} previous_section = item.section - gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='

{}

'.format(item.section[1])) + elem_id, text = item.section + gr.HTML(elem_id="settings_header_text_{}".format(elem_id), value='

{}

'.format(text)) if k in quicksettings_names: quicksettings_list.append((i, k, item)) -- cgit v1.2.1 From dbc8ab65f6d496459a76547776b656c96ad1350d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 22 Oct 2022 19:19:17 +0300 Subject: typo --- modules/script_callbacks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 1270e50f..5bcccd67 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -48,6 +48,6 @@ def on_ui_tabs(callback): def on_ui_settings(callback): - """register a function to be called before UI settingsare populated; add your settings + """register a function to be called before UI settings are populated; add your settings by using shared.opts.add_option(shared.OptionInfo(...)) """ callbacks_ui_settings.append(callback) -- cgit v1.2.1 From 72383abacdc6a101704a6f73758ce4d0bb68c9d1 Mon Sep 17 00:00:00 2001 From: Greendayle Date: Sat, 22 Oct 2022 16:50:07 +0200 Subject: Deepdanbooru linux fix --- modules/deepbooru.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 8914662d..3c34ab7c 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -50,7 +50,8 @@ def create_deepbooru_process(threshold, deepbooru_opts): the tags. """ from modules import shared # prevents circular reference - shared.deepbooru_process_manager = multiprocessing.Manager() + context = multiprocessing.get_context("spawn") + shared.deepbooru_process_manager = context.Manager() shared.deepbooru_process_queue = shared.deepbooru_process_manager.Queue() shared.deepbooru_process_return = shared.deepbooru_process_manager.dict() shared.deepbooru_process_return["value"] = -1 -- cgit v1.2.1 From e38625011cd4955da4bc67fe95d1d0f4c0c53899 Mon Sep 17 00:00:00 2001 From: Greendayle Date: Sat, 22 Oct 2022 16:56:52 +0200 Subject: fix part2 --- modules/deepbooru.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 3c34ab7c..8bbc90a4 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -55,7 +55,7 @@ def create_deepbooru_process(threshold, deepbooru_opts): shared.deepbooru_process_queue = shared.deepbooru_process_manager.Queue() shared.deepbooru_process_return = shared.deepbooru_process_manager.dict() shared.deepbooru_process_return["value"] = -1 - shared.deepbooru_process = multiprocessing.Process(target=deepbooru_process, args=(shared.deepbooru_process_queue, shared.deepbooru_process_return, threshold, deepbooru_opts)) + shared.deepbooru_process = context.Process(target=deepbooru_process, args=(shared.deepbooru_process_queue, shared.deepbooru_process_return, threshold, deepbooru_opts)) shared.deepbooru_process.start() -- cgit v1.2.1 From 324c7c732dd9afc3d4c397c354797ae5d655b514 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 22 Oct 2022 20:09:37 +0300 Subject: record First pass size as 0x0 for #3328 --- modules/processing.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 372489f7..27c669b0 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -524,6 +524,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): else: state.job_count = state.job_count * 2 + self.extra_generation_params["First pass size"] = f"{self.firstphase_width}x{self.firstphase_height}" + if self.firstphase_width == 0 or self.firstphase_height == 0: desired_pixel_count = 512 * 512 actual_pixel_count = self.width * self.height @@ -545,7 +547,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): firstphase_width_truncated = self.firstphase_height * self.width / self.height firstphase_height_truncated = self.firstphase_height - self.extra_generation_params["First pass size"] = f"{self.firstphase_width}x{self.firstphase_height}" self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f -- cgit v1.2.1 From 0df94d3fcf9d1fc47c4d39039352a3d5b3380c1f Mon Sep 17 00:00:00 2001 From: MrCheeze Date: Sat, 22 Oct 2022 12:59:21 -0400 Subject: fix aesthetic gradients doing nothing after loading a different model --- modules/sd_models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/sd_models.py b/modules/sd_models.py index f9b3063d..49dc3238 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -236,12 +236,11 @@ def load_model(checkpoint_info=None): sd_model.to(shared.device) sd_hijack.model_hijack.hijack(sd_model) + script_callbacks.model_loaded_callback(sd_model) sd_model.eval() shared.sd_model = sd_model - script_callbacks.model_loaded_callback(sd_model) - print(f"Model loaded.") return sd_model @@ -268,6 +267,7 @@ def reload_model_weights(sd_model, info=None): load_model_weights(sd_model, checkpoint_info) sd_hijack.model_hijack.hijack(sd_model) + script_callbacks.model_loaded_callback(sd_model) if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram: sd_model.to(devices.device) -- cgit v1.2.1 From 321bacc6a9eaf4a25f31279f288fa752be507a20 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 22 Oct 2022 20:15:12 +0300 Subject: call model_loaded_callback after setting shared.sd_model in case scripts refer to it using that --- modules/sd_models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/sd_models.py b/modules/sd_models.py index 49dc3238..e697bb72 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -236,11 +236,12 @@ def load_model(checkpoint_info=None): sd_model.to(shared.device) sd_hijack.model_hijack.hijack(sd_model) - script_callbacks.model_loaded_callback(sd_model) sd_model.eval() shared.sd_model = sd_model + script_callbacks.model_loaded_callback(sd_model) + print(f"Model loaded.") return sd_model -- cgit v1.2.1 From 24694e5983d0944b901892cb101878e6dec89a20 Mon Sep 17 00:00:00 2001 From: AngelBottomless <35677394+aria1th@users.noreply.github.com> Date: Sun, 23 Oct 2022 01:57:58 +0900 Subject: Update hypernetwork.py --- modules/hypernetworks/hypernetwork.py | 55 ++++++++++++++++++++++++++++------- 1 file changed, 44 insertions(+), 11 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 3bc71ee5..81132be4 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -16,6 +16,7 @@ from modules.textual_inversion import textual_inversion from modules.textual_inversion.learn_schedule import LearnRateScheduler from torch import einsum +from statistics import stdev, mean class HypernetworkModule(torch.nn.Module): multiplier = 1.0 @@ -268,6 +269,32 @@ def stack_conds(conds): return torch.stack(conds) +def log_statistics(loss_info:dict, key, value): + if key not in loss_info: + loss_info[key] = [value] + else: + loss_info[key].append(value) + if len(loss_info) > 1024: + loss_info.pop(0) + + +def statistics(data): + total_information = f"loss:{mean(data):.3f}"+u"\u00B1"+f"({stdev(data)/ (len(data)**0.5):.3f})" + recent_data = data[-32:] + recent_information = f"recent 32 loss:{mean(recent_data):.3f}"+u"\u00B1"+f"({stdev(recent_data)/ (len(recent_data)**0.5):.3f})" + return total_information, recent_information + + +def report_statistics(loss_info:dict): + keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x])) + for key in keys: + info, recent = statistics(loss_info[key]) + print("Loss statistics for file " + key) + print(info) + print(recent) + + + def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): # images allows training previews to have infotext. Importing it at the top causes a circular import problem. from modules import images @@ -310,7 +337,11 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log for weight in weights: weight.requires_grad = True - losses = torch.zeros((32,)) + size = len(ds.indexes) + loss_dict = {} + losses = torch.zeros((size,)) + previous_mean_loss = 0 + print("Mean loss of {} elements".format(size)) last_saved_file = "" last_saved_image = "" @@ -329,7 +360,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) for i, entries in pbar: hypernetwork.step = i + ititial_step - + if loss_dict and i % size == 0: + previous_mean_loss = sum(i[-1] for i in loss_dict.values()) / len(loss_dict) + scheduler.apply(optimizer, hypernetwork.step) if scheduler.finished: break @@ -346,7 +379,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log del c losses[hypernetwork.step % losses.shape[0]] = loss.item() - + for entry in entries: + log_statistics(loss_dict, entry.filename, loss.item()) + optimizer.zero_grad() weights[0].grad = None loss.backward() @@ -359,10 +394,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log optimizer.step() - mean_loss = losses.mean() - if torch.isnan(mean_loss): + if torch.isnan(losses[hypernetwork.step % losses.shape[0]]): raise RuntimeError("Loss diverged.") - pbar.set_description(f"loss: {mean_loss:.7f}") + pbar.set_description(f"dataset loss: {previous_mean_loss:.7f}") if hypernetwork.step > 0 and hypernetwork_dir is not None and hypernetwork.step % save_hypernetwork_every == 0: # Before saving, change name to match current checkpoint. @@ -371,7 +405,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log hypernetwork.save(last_saved_file) textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), { - "loss": f"{mean_loss:.7f}", + "loss": f"{previous_mean_loss:.7f}", "learn_rate": scheduler.learn_rate }) @@ -420,14 +454,15 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log shared.state.textinfo = f"""

-Loss: {mean_loss:.7f}
+Loss: {previous_mean_loss:.7f}
Step: {hypernetwork.step}
Last prompt: {html.escape(entries[0].cond_text)}
Last saved hypernetwork: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}

""" - + + report_statistics(loss_dict) checkpoint = sd_models.select_checkpoint() hypernetwork.sd_checkpoint = checkpoint.hash @@ -438,5 +473,3 @@ Last saved image: {html.escape(last_saved_image)}
hypernetwork.save(filename) return hypernetwork, filename - - -- cgit v1.2.1 From 4fdb53c1e9962507fc8336dad9a0fabfe6c418c0 Mon Sep 17 00:00:00 2001 From: Unnoen Date: Wed, 19 Oct 2022 21:38:10 +1100 Subject: Generate grid preview for progress image --- modules/sd_samplers.py | 26 +++++++++++++++++++++++++- modules/shared.py | 1 + modules/ui.py | 5 ++++- 3 files changed, 30 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index f58a29b9..74a480e5 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -7,7 +7,7 @@ import inspect import k_diffusion.sampling import ldm.models.diffusion.ddim import ldm.models.diffusion.plms -from modules import prompt_parser, devices, processing +from modules import prompt_parser, devices, processing, images from modules.shared import opts, cmd_opts, state import modules.shared as shared @@ -89,6 +89,30 @@ def sample_to_image(samples): x_sample = x_sample.astype(np.uint8) return Image.fromarray(x_sample) +def samples_to_image_grid(samples): + progress_images = [] + for i in range(len(samples)): + # Decode the samples individually to reduce VRAM usage at the cost of a bit of speed. + x_sample = processing.decode_first_stage(shared.sd_model, samples[i:i+1])[0] + x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) + x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) + x_sample = x_sample.astype(np.uint8) + progress_images.append(Image.fromarray(x_sample)) + + return images.image_grid(progress_images) + +def samples_to_image_grid_combined(samples): + progress_images = [] + # Decode all samples at once to increase speed at the cost of VRAM usage. + x_samples = processing.decode_first_stage(shared.sd_model, samples) + x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) + + for x_sample in x_samples: + x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) + x_sample = x_sample.astype(np.uint8) + progress_images.append(Image.fromarray(x_sample)) + + return images.image_grid(progress_images) def store_latent(decoded): state.current_latent = decoded diff --git a/modules/shared.py b/modules/shared.py index d9cb65ef..95d6e225 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -294,6 +294,7 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"), options_templates.update(options_section(('ui', "User interface"), { "show_progressbar": OptionInfo(True, "Show progressbar"), "show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}), + "progress_decode_combined": OptionInfo(False, "Decode all progress images at once. (Slighty speeds up progress generation but consumes significantly more VRAM with large batches.)"), "return_grid": OptionInfo(True, "Show grid in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), diff --git a/modules/ui.py b/modules/ui.py index 56c233ab..de0abc7e 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -318,7 +318,10 @@ def check_progress_call(id_part): if shared.parallel_processing_allowed: if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None: - shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent) + if opts.progress_decode_combined: + shared.state.current_image = modules.sd_samplers.samples_to_image_grid_combined(shared.state.current_latent) + else: + shared.state.current_image = modules.sd_samplers.samples_to_image_grid(shared.state.current_latent) shared.state.current_image_sampling_step = shared.state.sampling_step image = shared.state.current_image -- cgit v1.2.1 From d213d6ca6f90094cb45c11e2f3cb37d25a8d1f94 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 22 Oct 2022 20:48:13 +0300 Subject: removed the option to use 2x more memory when generating previews added an option to always only show one image in previews removed duplicate code --- modules/sd_samplers.py | 35 ++++++++++------------------------- modules/shared.py | 2 +- modules/ui.py | 6 +++--- 3 files changed, 14 insertions(+), 29 deletions(-) (limited to 'modules') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 74a480e5..0b408a70 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -71,6 +71,7 @@ sampler_extra_params = { 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], } + def setup_img2img_steps(p, steps=None): if opts.img2img_fix_steps or steps is not None: steps = int((steps or p.steps) / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0 @@ -82,37 +83,21 @@ def setup_img2img_steps(p, steps=None): return steps, t_enc -def sample_to_image(samples): - x_sample = processing.decode_first_stage(shared.sd_model, samples[0:1])[0] +def single_sample_to_image(sample): + x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) return Image.fromarray(x_sample) + +def sample_to_image(samples): + return single_sample_to_image(samples[0]) + + def samples_to_image_grid(samples): - progress_images = [] - for i in range(len(samples)): - # Decode the samples individually to reduce VRAM usage at the cost of a bit of speed. - x_sample = processing.decode_first_stage(shared.sd_model, samples[i:i+1])[0] - x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) - x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) - x_sample = x_sample.astype(np.uint8) - progress_images.append(Image.fromarray(x_sample)) - - return images.image_grid(progress_images) - -def samples_to_image_grid_combined(samples): - progress_images = [] - # Decode all samples at once to increase speed at the cost of VRAM usage. - x_samples = processing.decode_first_stage(shared.sd_model, samples) - x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) - - for x_sample in x_samples: - x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) - x_sample = x_sample.astype(np.uint8) - progress_images.append(Image.fromarray(x_sample)) - - return images.image_grid(progress_images) + return images.image_grid([single_sample_to_image(sample) for sample in samples]) + def store_latent(decoded): state.current_latent = decoded diff --git a/modules/shared.py b/modules/shared.py index 95d6e225..25bfc895 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -294,7 +294,7 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"), options_templates.update(options_section(('ui', "User interface"), { "show_progressbar": OptionInfo(True, "Show progressbar"), "show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}), - "progress_decode_combined": OptionInfo(False, "Decode all progress images at once. (Slighty speeds up progress generation but consumes significantly more VRAM with large batches.)"), + "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), "return_grid": OptionInfo(True, "Show grid in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), diff --git a/modules/ui.py b/modules/ui.py index de0abc7e..ffa14cac 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -318,10 +318,10 @@ def check_progress_call(id_part): if shared.parallel_processing_allowed: if shared.state.sampling_step - shared.state.current_image_sampling_step >= opts.show_progress_every_n_steps and shared.state.current_latent is not None: - if opts.progress_decode_combined: - shared.state.current_image = modules.sd_samplers.samples_to_image_grid_combined(shared.state.current_latent) - else: + if opts.show_progress_grid: shared.state.current_image = modules.sd_samplers.samples_to_image_grid(shared.state.current_latent) + else: + shared.state.current_image = modules.sd_samplers.sample_to_image(shared.state.current_latent) shared.state.current_image_sampling_step = shared.state.sampling_step image = shared.state.current_image -- cgit v1.2.1 From be748e8b086bd9834d08bdd9160649a5e7700af7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 22 Oct 2022 22:05:22 +0300 Subject: add --freeze-settings commandline argument to disable changing settings --- modules/shared.py | 1 + modules/ui.py | 11 +++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 25bfc895..b55371d3 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -64,6 +64,7 @@ parser.add_argument("--port", type=int, help="launch gradio with given server po parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False) parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(script_path, 'ui-config.json')) parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False) +parser.add_argument("--freeze-settings", action='store_true', help="disable editing settings", default=False) parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json')) parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option") parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None) diff --git a/modules/ui.py b/modules/ui.py index ffa14cac..2311572c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -580,6 +580,9 @@ def apply_setting(key, value): if value is None: return gr.update() + if shared.cmd_opts.freeze_settings: + return gr.update() + # dont allow model to be swapped when model hash exists in prompt if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap: return gr.update() @@ -1501,6 +1504,8 @@ Requested path was: {f} def run_settings(*args): changed = 0 + assert not shared.cmd_opts.freeze_settings, "changing settings is disabled" + for key, value, comp in zip(opts.data_labels.keys(), args, components): if comp != dummy_component and not opts.same_type(value, opts.data_labels[key].default): return f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}", opts.dumpjson() @@ -1530,6 +1535,8 @@ Requested path was: {f} return f'{changed} settings changed.', opts.dumpjson() def run_settings_single(value, key): + assert not shared.cmd_opts.freeze_settings, "changing settings is disabled" + if not opts.same_type(value, opts.data_labels[key].default): return gr.update(visible=True), opts.dumpjson() @@ -1582,7 +1589,7 @@ Requested path was: {f} elem_id, text = item.section gr.HTML(elem_id="settings_header_text_{}".format(elem_id), value='

{}

'.format(text)) - if k in quicksettings_names: + if k in quicksettings_names and not shared.cmd_opts.freeze_settings: quicksettings_list.append((i, k, item)) components.append(dummy_component) else: @@ -1615,7 +1622,7 @@ Requested path was: {f} def reload_scripts(): modules.scripts.reload_script_body_only() - reload_javascript() # need to refresh the html page + reload_javascript() # need to refresh the html page reload_script_bodies.click( fn=reload_scripts, -- cgit v1.2.1 From ca5a9e79dc28eeaa3a161427a82e34703bf15765 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 22 Oct 2022 22:06:54 +0300 Subject: fix for img2img color correction in a batch #3218 --- modules/processing.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 27c669b0..b1877b80 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -403,8 +403,6 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if (len(prompts) == 0): break - #uc = p.sd_model.get_learned_conditioning(len(prompts) * [p.negative_prompt]) - #c = p.sd_model.get_learned_conditioning(prompts) with devices.autocast(): uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps) c = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, prompts, p.steps) @@ -716,6 +714,10 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): batch_images = np.expand_dims(imgs[0], axis=0).repeat(self.batch_size, axis=0) if self.overlay_images is not None: self.overlay_images = self.overlay_images * self.batch_size + + if self.color_corrections is not None and len(self.color_corrections) == 1: + self.color_corrections = self.color_corrections * self.batch_size + elif len(imgs) <= self.batch_size: self.batch_size = len(imgs) batch_images = np.array(imgs) -- cgit v1.2.1 From 48dbf99e84045ee7af55bc5b1b86492a240e631e Mon Sep 17 00:00:00 2001 From: AngelBottomless <35677394+aria1th@users.noreply.github.com> Date: Sun, 23 Oct 2022 04:17:16 +0900 Subject: Allow tracking real-time loss Someone had 6000 images in their dataset, and it was shown as 0, which was confusing. This will allow tracking real time dataset-average loss for registered objects. --- modules/hypernetworks/hypernetwork.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 81132be4..99fd0f8f 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -360,7 +360,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) for i, entries in pbar: hypernetwork.step = i + ititial_step - if loss_dict and i % size == 0: + if len(loss_dict) > 0: previous_mean_loss = sum(i[-1] for i in loss_dict.values()) / len(loss_dict) scheduler.apply(optimizer, hypernetwork.step) -- cgit v1.2.1 From 1b4d04737ac513cbd55958bb60a4f85166f3484b Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sat, 22 Oct 2022 20:13:16 -0300 Subject: Remove unused imports --- modules/api/api.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 5b0c934e..a5136b4b 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,11 +1,9 @@ from modules.api.processing import StableDiffusionProcessingAPI from modules.processing import StableDiffusionProcessingTxt2Img, process_images from modules.sd_samplers import all_samplers -from modules.extras import run_pnginfo import modules.shared as shared import uvicorn -from fastapi import Body, APIRouter, HTTPException -from fastapi.responses import JSONResponse +from fastapi import APIRouter, HTTPException from pydantic import BaseModel, Field, Json import json import io @@ -18,7 +16,6 @@ class TextToImageResponse(BaseModel): parameters: Json info: Json - class Api: def __init__(self, app, queue_lock): self.router = APIRouter() -- cgit v1.2.1 From b02926df1393df311db734af149fb9faf4389cbe Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sat, 22 Oct 2022 20:24:04 -0300 Subject: Moved moodels to their own file and extracted base64 conversion to its own function --- modules/api/api.py | 17 ++++++----------- modules/api/models.py | 8 ++++++++ 2 files changed, 14 insertions(+), 11 deletions(-) create mode 100644 modules/api/models.py (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index a5136b4b..c17d7580 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -4,17 +4,17 @@ from modules.sd_samplers import all_samplers import modules.shared as shared import uvicorn from fastapi import APIRouter, HTTPException -from pydantic import BaseModel, Field, Json import json import io import base64 +from modules.api.models import * sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) -class TextToImageResponse(BaseModel): - images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") - parameters: Json - info: Json +def img_to_base64(img): + buffer = io.BytesIO() + img.save(buffer, format="png") + return base64.b64encode(buffer.getvalue()) class Api: def __init__(self, app, queue_lock): @@ -41,15 +41,10 @@ class Api: with self.queue_lock: processed = process_images(p) - b64images = [] - for i in processed.images: - buffer = io.BytesIO() - i.save(buffer, format="png") - b64images.append(base64.b64encode(buffer.getvalue())) + b64images = list(map(img_to_base64, processed.images)) return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=json.dumps(processed.info)) - def img2imgapi(self): raise NotImplementedError diff --git a/modules/api/models.py b/modules/api/models.py new file mode 100644 index 00000000..a7d247d8 --- /dev/null +++ b/modules/api/models.py @@ -0,0 +1,8 @@ +from pydantic import BaseModel, Field, Json + +class TextToImageResponse(BaseModel): + images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + parameters: Json + info: Json + + \ No newline at end of file -- cgit v1.2.1 From 28e26c2bef217ae82eb9e980cceb3f67ef22e109 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sat, 22 Oct 2022 23:13:32 -0300 Subject: Add "extra" single image operation - Separate extra modes into 3 endpoints so the user ddoesn't ahve to handle so many unused parameters. - Add response model for codumentation --- modules/api/api.py | 43 ++++++++++++++++++++++++++++++++++++++----- modules/api/models.py | 26 +++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 6 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index c17d7580..3b804373 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -8,20 +8,42 @@ import json import io import base64 from modules.api.models import * +from PIL import Image +from modules.extras import run_extras + +def upscaler_to_index(name: str): + try: + return [x.name.lower() for x in shared.sd_upscalers].index(name.lower()) + except: + raise HTTPException(status_code=400, detail="Upscaler not found") sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) -def img_to_base64(img): +def img_to_base64(img: str): buffer = io.BytesIO() img.save(buffer, format="png") return base64.b64encode(buffer.getvalue()) +def base64_to_bytes(base64Img: str): + if "," in base64Img: + base64Img = base64Img.split(",")[1] + return io.BytesIO(base64.b64decode(base64Img)) + +def base64_to_images(base64Imgs: list[str]): + imgs = [] + for img in base64Imgs: + img = Image.open(base64_to_bytes(img)) + imgs.append(img) + return imgs + + class Api: def __init__(self, app, queue_lock): self.router = APIRouter() self.app = app self.queue_lock = queue_lock - self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"]) + self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) + self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) def text2imgapi(self, txt2imgreq: StableDiffusionProcessingAPI ): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -45,12 +67,23 @@ class Api: return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=json.dumps(processed.info)) - def img2imgapi(self): raise NotImplementedError - def extrasapi(self): - raise NotImplementedError + def extras_single_image_api(self, req: ExtrasSingleImageRequest): + upscaler1Index = upscaler_to_index(req.upscaler_1) + upscaler2Index = upscaler_to_index(req.upscaler_2) + + reqDict = vars(req) + reqDict.pop('upscaler_1') + reqDict.pop('upscaler_2') + + reqDict['image'] = base64_to_images([reqDict['image']])[0] + + with self.queue_lock: + result = run_extras(**reqDict, extras_upscaler_1=upscaler1Index, extras_upscaler_2=upscaler2Index, extras_mode=0, image_folder="", input_dir="", output_dir="") + + return ExtrasSingleImageResponse(image="data:image/png;base64,"+img_to_base64(result[0]), html_info_x=result[1], html_info=result[2]) def pnginfoapi(self): raise NotImplementedError diff --git a/modules/api/models.py b/modules/api/models.py index a7d247d8..dcf1ab54 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,8 +1,32 @@ from pydantic import BaseModel, Field, Json +from typing_extensions import Literal +from modules.shared import sd_upscalers class TextToImageResponse(BaseModel): images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") parameters: Json info: Json - \ No newline at end of file +class ExtrasBaseRequest(BaseModel): + resize_mode: Literal[0, 1] = Field(default=0, title="Resize Mode", description="Sets the resize mode: 0 to upscale by upscaling_resize amount, 1 to upscale up to upscaling_resize_h x upscaling_resize_w.") + show_extras_results: bool = Field(default=True, title="Show results", description="Should the backend return the generated image?") + gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.") + codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.") + codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.") + upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=4, description="By how much to upscale the image, only used when resize_mode=0.") + upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.") + upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.") + upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the choosen size?") + upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") + upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") + extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.") + +class ExtraBaseResponse(BaseModel): + html_info_x: str + html_info: str + +class ExtrasSingleImageRequest(ExtrasBaseRequest): + image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.") + +class ExtrasSingleImageResponse(ExtraBaseResponse): + image: str = Field(default=None, title="Image", description="The generated image in base64 format.") \ No newline at end of file -- cgit v1.2.1 From 1fbfc052eb529d8cf8ce5baf578bcf93d0280c29 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 23 Oct 2022 05:43:34 +0100 Subject: Update hypernetwork.py --- modules/hypernetworks/hypernetwork.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 99fd0f8f..98a7b62e 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -288,10 +288,13 @@ def statistics(data): def report_statistics(loss_info:dict): keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x])) for key in keys: - info, recent = statistics(loss_info[key]) - print("Loss statistics for file " + key) - print(info) - print(recent) + try: + print("Loss statistics for file " + key) + info, recent = statistics(loss_info[key]) + print(info) + print(recent) + except Exception as e: + print(e) -- cgit v1.2.1 From a7c213d0f5ebb10722629b8490a5863f9ce6c4fa Mon Sep 17 00:00:00 2001 From: Stephen Date: Fri, 21 Oct 2022 19:27:40 -0400 Subject: [API][Feature] - Add img2img API endpoint --- modules/api/api.py | 58 +++++++++++++++++++++++++++++++++++++++++++---- modules/api/processing.py | 11 +++++++-- modules/processing.py | 2 +- 3 files changed, 63 insertions(+), 8 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 5b0c934e..a04f2428 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,5 +1,5 @@ -from modules.api.processing import StableDiffusionProcessingAPI -from modules.processing import StableDiffusionProcessingTxt2Img, process_images +from modules.api.processing import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI +from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.sd_samplers import all_samplers from modules.extras import run_pnginfo import modules.shared as shared @@ -10,6 +10,7 @@ from pydantic import BaseModel, Field, Json import json import io import base64 +from PIL import Image sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) @@ -18,6 +19,11 @@ class TextToImageResponse(BaseModel): parameters: Json info: Json +class ImageToImageResponse(BaseModel): + images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + parameters: Json + info: Json + class Api: def __init__(self, app, queue_lock): @@ -25,8 +31,9 @@ class Api: self.app = app self.queue_lock = queue_lock self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"]) + self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"]) - def text2imgapi(self, txt2imgreq: StableDiffusionProcessingAPI ): + def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) if sampler_index is None: @@ -54,8 +61,49 @@ class Api: - def img2imgapi(self): - raise NotImplementedError + def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI): + sampler_index = sampler_to_index(img2imgreq.sampler_index) + + if sampler_index is None: + raise HTTPException(status_code=404, detail="Sampler not found") + + + init_images = img2imgreq.init_images + if init_images is None: + raise HTTPException(status_code=404, detail="Init image not found") + + + populate = img2imgreq.copy(update={ # Override __init__ params + "sd_model": shared.sd_model, + "sampler_index": sampler_index[0], + "do_not_save_samples": True, + "do_not_save_grid": True + } + ) + p = StableDiffusionProcessingImg2Img(**vars(populate)) + + imgs = [] + for img in init_images: + # if has a comma, deal with prefix + if "," in img: + img = img.split(",")[1] + # convert base64 to PIL image + img = base64.b64decode(img) + img = Image.open(io.BytesIO(img)) + imgs = [img] * p.batch_size + + p.init_images = imgs + # Override object param + with self.queue_lock: + processed = process_images(p) + + b64images = [] + for i in processed.images: + buffer = io.BytesIO() + i.save(buffer, format="png") + b64images.append(base64.b64encode(buffer.getvalue())) + + return ImageToImageResponse(images=b64images, parameters=json.dumps(vars(img2imgreq)), info=json.dumps(processed.info)) def extrasapi(self): raise NotImplementedError diff --git a/modules/api/processing.py b/modules/api/processing.py index 4c541241..9f1d65c0 100644 --- a/modules/api/processing.py +++ b/modules/api/processing.py @@ -1,7 +1,8 @@ +from array import array from inflection import underscore from typing import Any, Dict, Optional from pydantic import BaseModel, Field, create_model -from modules.processing import StableDiffusionProcessingTxt2Img +from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img import inspect @@ -92,8 +93,14 @@ class PydanticModelGenerator: DynamicModel.__config__.allow_mutation = True return DynamicModel -StableDiffusionProcessingAPI = PydanticModelGenerator( +StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( "StableDiffusionProcessingTxt2Img", StableDiffusionProcessingTxt2Img, [{"key": "sampler_index", "type": str, "default": "Euler"}] +).generate_model() + +StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( + "StableDiffusionProcessingImg2Img", + StableDiffusionProcessingImg2Img, + [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}] ).generate_model() \ No newline at end of file diff --git a/modules/processing.py b/modules/processing.py index b1877b80..1557ed8c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -623,7 +623,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): sampler = None - def __init__(self, init_images=None, resize_mode=0, denoising_strength=0.75, mask=None, mask_blur=4, inpainting_fill=0, inpaint_full_res=True, inpaint_full_res_padding=0, inpainting_mask_invert=0, **kwargs): + def __init__(self, init_images: list=None, resize_mode: int=0, denoising_strength: float=0.75, mask: str=None, mask_blur: int=4, inpainting_fill: int=0, inpaint_full_res: bool=True, inpaint_full_res_padding: int=0, inpainting_mask_invert: int=0, **kwargs): super().__init__(**kwargs) self.init_images = init_images -- cgit v1.2.1 From 9e1a8b7734a2881451a2efbf80def011ea41ba49 Mon Sep 17 00:00:00 2001 From: Stephen Date: Sat, 22 Oct 2022 15:42:00 -0400 Subject: non-implemented mask with any type --- modules/api/api.py | 4 ++++ modules/api/processing.py | 2 +- modules/processing.py | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index a04f2428..3df6ff96 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -72,6 +72,10 @@ class Api: if init_images is None: raise HTTPException(status_code=404, detail="Init image not found") + mask = img2imgreq.mask + if mask: + raise HTTPException(status_code=400, detail="Mask not supported yet") + populate = img2imgreq.copy(update={ # Override __init__ params "sd_model": shared.sd_model, diff --git a/modules/api/processing.py b/modules/api/processing.py index 9f1d65c0..f551fa35 100644 --- a/modules/api/processing.py +++ b/modules/api/processing.py @@ -102,5 +102,5 @@ StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( "StableDiffusionProcessingImg2Img", StableDiffusionProcessingImg2Img, - [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}] + [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}] ).generate_model() \ No newline at end of file diff --git a/modules/processing.py b/modules/processing.py index 1557ed8c..ff83023c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -623,7 +623,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): sampler = None - def __init__(self, init_images: list=None, resize_mode: int=0, denoising_strength: float=0.75, mask: str=None, mask_blur: int=4, inpainting_fill: int=0, inpaint_full_res: bool=True, inpaint_full_res_padding: int=0, inpainting_mask_invert: int=0, **kwargs): + def __init__(self, init_images: list=None, resize_mode: int=0, denoising_strength: float=0.75, mask: Any=None, mask_blur: int=4, inpainting_fill: int=0, inpaint_full_res: bool=True, inpaint_full_res_padding: int=0, inpainting_mask_invert: int=0, **kwargs): super().__init__(**kwargs) self.init_images = init_images -- cgit v1.2.1 From 5dc0739ecdc1ade8fcf4eb77f2a503ef12489f32 Mon Sep 17 00:00:00 2001 From: Stephen Date: Sat, 22 Oct 2022 17:10:28 -0400 Subject: working mask --- modules/api/api.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 3df6ff96..3caa83a4 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -33,6 +33,14 @@ class Api: self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"]) self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"]) + def __base64_to_image(self, base64_string): + # if has a comma, deal with prefix + if "," in base64_string: + base64_string = base64_string.split(",")[1] + imgdata = base64.b64decode(base64_string) + # convert base64 to PIL image + return Image.open(io.BytesIO(imgdata)) + def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -74,26 +82,22 @@ class Api: mask = img2imgreq.mask if mask: - raise HTTPException(status_code=400, detail="Mask not supported yet") + mask = self.__base64_to_image(mask) populate = img2imgreq.copy(update={ # Override __init__ params "sd_model": shared.sd_model, "sampler_index": sampler_index[0], "do_not_save_samples": True, - "do_not_save_grid": True + "do_not_save_grid": True, + "mask": mask } ) p = StableDiffusionProcessingImg2Img(**vars(populate)) imgs = [] for img in init_images: - # if has a comma, deal with prefix - if "," in img: - img = img.split(",")[1] - # convert base64 to PIL image - img = base64.b64decode(img) - img = Image.open(io.BytesIO(img)) + img = self.__base64_to_image(img) imgs = [img] * p.batch_size p.init_images = imgs -- cgit v1.2.1 From 1be5933ba21a3badec42b7b2753d626f849b609d Mon Sep 17 00:00:00 2001 From: captin411 Date: Sun, 23 Oct 2022 04:11:07 -0700 Subject: auto cropping now works with non square crops --- modules/textual_inversion/autocrop.py | 509 ++++++++++++++++++---------------- 1 file changed, 269 insertions(+), 240 deletions(-) (limited to 'modules') diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index 5a551c25..b2f9241c 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -1,241 +1,270 @@ -import cv2 -from collections import defaultdict -from math import log, sqrt -import numpy as np -from PIL import Image, ImageDraw - -GREEN = "#0F0" -BLUE = "#00F" -RED = "#F00" - - -def crop_image(im, settings): - """ Intelligently crop an image to the subject matter """ - if im.height > im.width: - im = im.resize((settings.crop_width, settings.crop_height * im.height // im.width)) - elif im.width > im.height: - im = im.resize((settings.crop_width * im.width // im.height, settings.crop_height)) - else: - im = im.resize((settings.crop_width, settings.crop_height)) - - if im.height == im.width: - return im - - focus = focal_point(im, settings) - - # take the focal point and turn it into crop coordinates that try to center over the focal - # point but then get adjusted back into the frame - y_half = int(settings.crop_height / 2) - x_half = int(settings.crop_width / 2) - - x1 = focus.x - x_half - if x1 < 0: - x1 = 0 - elif x1 + settings.crop_width > im.width: - x1 = im.width - settings.crop_width - - y1 = focus.y - y_half - if y1 < 0: - y1 = 0 - elif y1 + settings.crop_height > im.height: - y1 = im.height - settings.crop_height - - x2 = x1 + settings.crop_width - y2 = y1 + settings.crop_height - - crop = [x1, y1, x2, y2] - - if settings.annotate_image: - d = ImageDraw.Draw(im) - rect = list(crop) - rect[2] -= 1 - rect[3] -= 1 - d.rectangle(rect, outline=GREEN) - if settings.destop_view_image: - im.show() - - return im.crop(tuple(crop)) - -def focal_point(im, settings): - corner_points = image_corner_points(im, settings) - entropy_points = image_entropy_points(im, settings) - face_points = image_face_points(im, settings) - - total_points = len(corner_points) + len(entropy_points) + len(face_points) - - corner_weight = settings.corner_points_weight - entropy_weight = settings.entropy_points_weight - face_weight = settings.face_points_weight - - weight_pref_total = corner_weight + entropy_weight + face_weight - - # weight things - pois = [] - if weight_pref_total == 0 or total_points == 0: - return pois - - pois.extend( - [ PointOfInterest( p.x, p.y, weight=p.weight * ( (corner_weight/weight_pref_total) / (len(corner_points)/total_points) )) for p in corner_points ] - ) - pois.extend( - [ PointOfInterest( p.x, p.y, weight=p.weight * ( (entropy_weight/weight_pref_total) / (len(entropy_points)/total_points) )) for p in entropy_points ] - ) - pois.extend( - [ PointOfInterest( p.x, p.y, weight=p.weight * ( (face_weight/weight_pref_total) / (len(face_points)/total_points) )) for p in face_points ] - ) - - average_point = poi_average(pois, settings) - - if settings.annotate_image: - d = ImageDraw.Draw(im) - for f in face_points: - d.rectangle(f.bounding(f.size), outline=RED) - for f in entropy_points: - d.rectangle(f.bounding(30), outline=BLUE) - for poi in pois: - w = max(4, 4 * 0.5 * sqrt(poi.weight)) - d.ellipse(poi.bounding(w), fill=BLUE) - d.ellipse(average_point.bounding(25), outline=GREEN) - - return average_point - - -def image_face_points(im, settings): - np_im = np.array(im) - gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY) - - tries = [ - [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ], - [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ], - [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ], - [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ], - [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ], - [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ], - [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ], - [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ] - ] - - for t in tries: - # print(t[0]) - classifier = cv2.CascadeClassifier(t[0]) - minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side - try: - faces = classifier.detectMultiScale(gray, scaleFactor=1.1, - minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE) - except: - continue - - if len(faces) > 0: - rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces] - return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2])) for r in rects] - return [] - - -def image_corner_points(im, settings): - grayscale = im.convert("L") - - # naive attempt at preventing focal points from collecting at watermarks near the bottom - gd = ImageDraw.Draw(grayscale) - gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999") - - np_im = np.array(grayscale) - - points = cv2.goodFeaturesToTrack( - np_im, - maxCorners=100, - qualityLevel=0.04, - minDistance=min(grayscale.width, grayscale.height)*0.07, - useHarrisDetector=False, - ) - - if points is None: - return [] - - focal_points = [] - for point in points: - x, y = point.ravel() - focal_points.append(PointOfInterest(x, y, size=4)) - - return focal_points - - -def image_entropy_points(im, settings): - landscape = im.height < im.width - portrait = im.height > im.width - if landscape: - move_idx = [0, 2] - move_max = im.size[0] - elif portrait: - move_idx = [1, 3] - move_max = im.size[1] - else: - return [] - - e_max = 0 - crop_current = [0, 0, settings.crop_width, settings.crop_height] - crop_best = crop_current - while crop_current[move_idx[1]] < move_max: - crop = im.crop(tuple(crop_current)) - e = image_entropy(crop) - - if (e > e_max): - e_max = e - crop_best = list(crop_current) - - crop_current[move_idx[0]] += 4 - crop_current[move_idx[1]] += 4 - - x_mid = int(crop_best[0] + settings.crop_width/2) - y_mid = int(crop_best[1] + settings.crop_height/2) - - return [PointOfInterest(x_mid, y_mid, size=25)] - - -def image_entropy(im): - # greyscale image entropy - # band = np.asarray(im.convert("L")) - band = np.asarray(im.convert("1"), dtype=np.uint8) - hist, _ = np.histogram(band, bins=range(0, 256)) - hist = hist[hist > 0] - return -np.log2(hist / hist.sum()).sum() - - -def poi_average(pois, settings): - weight = 0.0 - x = 0.0 - y = 0.0 - for poi in pois: - weight += poi.weight - x += poi.x * poi.weight - y += poi.y * poi.weight - avg_x = round(x / weight) - avg_y = round(y / weight) - - return PointOfInterest(avg_x, avg_y) - - -class PointOfInterest: - def __init__(self, x, y, weight=1.0, size=10): - self.x = x - self.y = y - self.weight = weight - self.size = size - - def bounding(self, size): - return [ - self.x - size//2, - self.y - size//2, - self.x + size//2, - self.y + size//2 - ] - - -class Settings: - def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False): - self.crop_width = crop_width - self.crop_height = crop_height - self.corner_points_weight = corner_points_weight - self.entropy_points_weight = entropy_points_weight - self.face_points_weight = entropy_points_weight - self.annotate_image = annotate_image +import cv2 +from collections import defaultdict +from math import log, sqrt +import numpy as np +from PIL import Image, ImageDraw + +GREEN = "#0F0" +BLUE = "#00F" +RED = "#F00" + + +def crop_image(im, settings): + """ Intelligently crop an image to the subject matter """ + + scale_by = 1 + if is_landscape(im.width, im.height): + scale_by = settings.crop_height / im.height + elif is_portrait(im.width, im.height): + scale_by = settings.crop_width / im.width + elif is_square(im.width, im.height): + if is_square(settings.crop_width, settings.crop_height): + scale_by = settings.crop_width / im.width + elif is_landscape(settings.crop_width, settings.crop_height): + scale_by = settings.crop_width / im.width + elif is_portrait(settings.crop_width, settings.crop_height): + scale_by = settings.crop_height / im.height + + im = im.resize((int(im.width * scale_by), int(im.height * scale_by))) + + if im.width == settings.crop_width and im.height == settings.crop_height: + if settings.annotate_image: + d = ImageDraw.Draw(im) + rect = [0, 0, im.width, im.height] + rect[2] -= 1 + rect[3] -= 1 + d.rectangle(rect, outline=GREEN) + if settings.destop_view_image: + im.show() + return im + + focus = focal_point(im, settings) + + # take the focal point and turn it into crop coordinates that try to center over the focal + # point but then get adjusted back into the frame + y_half = int(settings.crop_height / 2) + x_half = int(settings.crop_width / 2) + + x1 = focus.x - x_half + if x1 < 0: + x1 = 0 + elif x1 + settings.crop_width > im.width: + x1 = im.width - settings.crop_width + + y1 = focus.y - y_half + if y1 < 0: + y1 = 0 + elif y1 + settings.crop_height > im.height: + y1 = im.height - settings.crop_height + + x2 = x1 + settings.crop_width + y2 = y1 + settings.crop_height + + crop = [x1, y1, x2, y2] + + if settings.annotate_image: + d = ImageDraw.Draw(im) + rect = list(crop) + rect[2] -= 1 + rect[3] -= 1 + d.rectangle(rect, outline=GREEN) + if settings.destop_view_image: + im.show() + + return im.crop(tuple(crop)) + +def focal_point(im, settings): + corner_points = image_corner_points(im, settings) + entropy_points = image_entropy_points(im, settings) + face_points = image_face_points(im, settings) + + total_points = len(corner_points) + len(entropy_points) + len(face_points) + + corner_weight = settings.corner_points_weight + entropy_weight = settings.entropy_points_weight + face_weight = settings.face_points_weight + + weight_pref_total = corner_weight + entropy_weight + face_weight + + # weight things + pois = [] + if weight_pref_total == 0 or total_points == 0: + return pois + + pois.extend( + [ PointOfInterest( p.x, p.y, weight=p.weight * ( (corner_weight/weight_pref_total) / (len(corner_points)/total_points) )) for p in corner_points ] + ) + pois.extend( + [ PointOfInterest( p.x, p.y, weight=p.weight * ( (entropy_weight/weight_pref_total) / (len(entropy_points)/total_points) )) for p in entropy_points ] + ) + pois.extend( + [ PointOfInterest( p.x, p.y, weight=p.weight * ( (face_weight/weight_pref_total) / (len(face_points)/total_points) )) for p in face_points ] + ) + + average_point = poi_average(pois, settings) + + if settings.annotate_image: + d = ImageDraw.Draw(im) + for f in face_points: + d.rectangle(f.bounding(f.size), outline=RED) + for f in entropy_points: + d.rectangle(f.bounding(30), outline=BLUE) + for poi in pois: + w = max(4, 4 * 0.5 * sqrt(poi.weight)) + d.ellipse(poi.bounding(w), fill=BLUE) + d.ellipse(average_point.bounding(25), outline=GREEN) + + return average_point + + +def image_face_points(im, settings): + np_im = np.array(im) + gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY) + + tries = [ + [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ], + [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ], + [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ], + [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ], + [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ], + [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ], + [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ], + [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ] + ] + + for t in tries: + # print(t[0]) + classifier = cv2.CascadeClassifier(t[0]) + minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side + try: + faces = classifier.detectMultiScale(gray, scaleFactor=1.1, + minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE) + except: + continue + + if len(faces) > 0: + rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces] + return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2])) for r in rects] + return [] + + +def image_corner_points(im, settings): + grayscale = im.convert("L") + + # naive attempt at preventing focal points from collecting at watermarks near the bottom + gd = ImageDraw.Draw(grayscale) + gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999") + + np_im = np.array(grayscale) + + points = cv2.goodFeaturesToTrack( + np_im, + maxCorners=100, + qualityLevel=0.04, + minDistance=min(grayscale.width, grayscale.height)*0.07, + useHarrisDetector=False, + ) + + if points is None: + return [] + + focal_points = [] + for point in points: + x, y = point.ravel() + focal_points.append(PointOfInterest(x, y, size=4)) + + return focal_points + + +def image_entropy_points(im, settings): + landscape = im.height < im.width + portrait = im.height > im.width + if landscape: + move_idx = [0, 2] + move_max = im.size[0] + elif portrait: + move_idx = [1, 3] + move_max = im.size[1] + else: + return [] + + e_max = 0 + crop_current = [0, 0, settings.crop_width, settings.crop_height] + crop_best = crop_current + while crop_current[move_idx[1]] < move_max: + crop = im.crop(tuple(crop_current)) + e = image_entropy(crop) + + if (e > e_max): + e_max = e + crop_best = list(crop_current) + + crop_current[move_idx[0]] += 4 + crop_current[move_idx[1]] += 4 + + x_mid = int(crop_best[0] + settings.crop_width/2) + y_mid = int(crop_best[1] + settings.crop_height/2) + + return [PointOfInterest(x_mid, y_mid, size=25)] + + +def image_entropy(im): + # greyscale image entropy + # band = np.asarray(im.convert("L")) + band = np.asarray(im.convert("1"), dtype=np.uint8) + hist, _ = np.histogram(band, bins=range(0, 256)) + hist = hist[hist > 0] + return -np.log2(hist / hist.sum()).sum() + + +def poi_average(pois, settings): + weight = 0.0 + x = 0.0 + y = 0.0 + for poi in pois: + weight += poi.weight + x += poi.x * poi.weight + y += poi.y * poi.weight + avg_x = round(x / weight) + avg_y = round(y / weight) + + return PointOfInterest(avg_x, avg_y) + + +def is_landscape(w, h): + return w > h + + +def is_portrait(w, h): + return h > w + + +def is_square(w, h): + return w == h + + +class PointOfInterest: + def __init__(self, x, y, weight=1.0, size=10): + self.x = x + self.y = y + self.weight = weight + self.size = size + + def bounding(self, size): + return [ + self.x - size//2, + self.y - size//2, + self.x + size//2, + self.y + size//2 + ] + + +class Settings: + def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False): + self.crop_width = crop_width + self.crop_height = crop_height + self.corner_points_weight = corner_points_weight + self.entropy_points_weight = entropy_points_weight + self.face_points_weight = entropy_points_weight + self.annotate_image = annotate_image self.destop_view_image = False \ No newline at end of file -- cgit v1.2.1 From 0523704dade0508bf3ae0c8cb799b1ae332d449b Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sun, 23 Oct 2022 12:27:50 -0300 Subject: Update run_extras to use the temp filename In batch mode run_extras tries to preserve the original file name of the images. The problem is that this makes no sense since the user only gets a list of images in the UI, trying to manually save them shows that this images have random temp names. Also, trying to keep "orig_name" in the API is a hassle that adds complexity to the consuming UI since the client has to use (or emulate) an input (type=file) element in a form. Using the normal file name not only doesn't change the output and functionality in the original UI but also helps keep the API simple. --- modules/extras.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/extras.py b/modules/extras.py index 22c5a1c1..29ac312e 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -33,7 +33,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ for img in image_folder: image = Image.open(img) imageArr.append(image) - imageNameArr.append(os.path.splitext(img.orig_name)[0]) + imageNameArr.append(os.path.splitext(img.name)[0]) elif extras_mode == 2: assert not shared.cmd_opts.hide_ui_dir_config, '--hide-ui-dir-config option must be disabled' -- cgit v1.2.1 From 4ff852ffb50859f2eae75375cab94dd790a46886 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sun, 23 Oct 2022 13:07:59 -0300 Subject: Add batch processing "extras" endpoint --- modules/api/api.py | 25 +++++++++++++++++++++++-- modules/api/models.py | 15 ++++++++++++++- 2 files changed, 37 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 3b804373..528134a8 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -10,6 +10,7 @@ import base64 from modules.api.models import * from PIL import Image from modules.extras import run_extras +from gradio import processing_utils def upscaler_to_index(name: str): try: @@ -44,6 +45,7 @@ class Api: self.queue_lock = queue_lock self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) + self.app.add_api_route("/sdapi/v1/extra-batch-image", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) def text2imgapi(self, txt2imgreq: StableDiffusionProcessingAPI ): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -78,12 +80,31 @@ class Api: reqDict.pop('upscaler_1') reqDict.pop('upscaler_2') - reqDict['image'] = base64_to_images([reqDict['image']])[0] + reqDict['image'] = processing_utils.decode_base64_to_file(reqDict['image']) with self.queue_lock: result = run_extras(**reqDict, extras_upscaler_1=upscaler1Index, extras_upscaler_2=upscaler2Index, extras_mode=0, image_folder="", input_dir="", output_dir="") - return ExtrasSingleImageResponse(image="data:image/png;base64,"+img_to_base64(result[0]), html_info_x=result[1], html_info=result[2]) + return ExtrasSingleImageResponse(image=processing_utils.encode_pil_to_base64(result[0]), html_info_x=result[1], html_info=result[2]) + + def extras_batch_images_api(self, req: ExtrasBatchImagesRequest): + upscaler1Index = upscaler_to_index(req.upscaler_1) + upscaler2Index = upscaler_to_index(req.upscaler_2) + + reqDict = vars(req) + reqDict.pop('upscaler_1') + reqDict.pop('upscaler_2') + + reqDict['image_folder'] = list(map(processing_utils.decode_base64_to_file, reqDict['imageList'])) + reqDict.pop('imageList') + + with self.queue_lock: + result = run_extras(**reqDict, extras_upscaler_1=upscaler1Index, extras_upscaler_2=upscaler2Index, extras_mode=1, image="", input_dir="", output_dir="") + + return ExtrasBatchImagesResponse(images=list(map(processing_utils.encode_pil_to_base64, result[0])), html_info_x=result[1], html_info=result[2]) + + def extras_folder_processing_api(self): + raise NotImplementedError def pnginfoapi(self): raise NotImplementedError diff --git a/modules/api/models.py b/modules/api/models.py index dcf1ab54..bbd0ef53 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -29,4 +29,17 @@ class ExtrasSingleImageRequest(ExtrasBaseRequest): image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.") class ExtrasSingleImageResponse(ExtraBaseResponse): - image: str = Field(default=None, title="Image", description="The generated image in base64 format.") \ No newline at end of file + image: str = Field(default=None, title="Image", description="The generated image in base64 format.") + +class SerializableImage(BaseModel): + path: str = Field(title="Path", description="The image's path ()") + +class ImageItem(BaseModel): + data: str = Field(title="image data") + name: str = Field(title="filename") + +class ExtrasBatchImagesRequest(ExtrasBaseRequest): + imageList: list[str] = Field(title="Images", description="List of images to work on. Must be Base64 strings") + +class ExtrasBatchImagesResponse(ExtraBaseResponse): + images: list[str] = Field(title="Images", description="The generated images in base64 format.") \ No newline at end of file -- cgit v1.2.1 From e0ca4dfbc10e0af8dfc4185e5e758f33fd2f0d81 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sun, 23 Oct 2022 15:13:37 -0300 Subject: Update endpoints to use gradio's own utils functions --- modules/api/api.py | 75 +++++++++++++++++++++++++-------------------------- modules/api/models.py | 4 +-- 2 files changed, 38 insertions(+), 41 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 3f490ce2..3acb1f36 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -20,27 +20,27 @@ def upscaler_to_index(name: str): sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) -def img_to_base64(img: str): - buffer = io.BytesIO() - img.save(buffer, format="png") - return base64.b64encode(buffer.getvalue()) - -def base64_to_bytes(base64Img: str): - if "," in base64Img: - base64Img = base64Img.split(",")[1] - return io.BytesIO(base64.b64decode(base64Img)) - -def base64_to_images(base64Imgs: list[str]): - imgs = [] - for img in base64Imgs: - img = Image.open(base64_to_bytes(img)) - imgs.append(img) - return imgs +# def img_to_base64(img: str): +# buffer = io.BytesIO() +# img.save(buffer, format="png") +# return base64.b64encode(buffer.getvalue()) + +# def base64_to_bytes(base64Img: str): +# if "," in base64Img: +# base64Img = base64Img.split(",")[1] +# return io.BytesIO(base64.b64decode(base64Img)) + +# def base64_to_images(base64Imgs: list[str]): +# imgs = [] +# for img in base64Imgs: +# img = Image.open(base64_to_bytes(img)) +# imgs.append(img) +# return imgs class ImageToImageResponse(BaseModel): images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") - parameters: Json - info: Json + parameters: dict + info: str class Api: @@ -49,17 +49,17 @@ class Api: self.app = app self.queue_lock = queue_lock self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) - self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"]) + self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) self.app.add_api_route("/sdapi/v1/extra-batch-image", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) - def __base64_to_image(self, base64_string): - # if has a comma, deal with prefix - if "," in base64_string: - base64_string = base64_string.split(",")[1] - imgdata = base64.b64decode(base64_string) - # convert base64 to PIL image - return Image.open(io.BytesIO(imgdata)) + # def __base64_to_image(self, base64_string): + # # if has a comma, deal with prefix + # if "," in base64_string: + # base64_string = base64_string.split(",")[1] + # imgdata = base64.b64decode(base64_string) + # # convert base64 to PIL image + # return Image.open(io.BytesIO(imgdata)) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -79,11 +79,9 @@ class Api: with self.queue_lock: processed = process_images(p) - b64images = list(map(img_to_base64, processed.images)) - - return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=json.dumps(processed.info)) - + b64images = list(map(processing_utils.encode_pil_to_base64, processed.images)) + return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=processed.info) def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI): sampler_index = sampler_to_index(img2imgreq.sampler_index) @@ -98,7 +96,7 @@ class Api: mask = img2imgreq.mask if mask: - mask = self.__base64_to_image(mask) + mask = processing_utils.decode_base64_to_image(mask) populate = img2imgreq.copy(update={ # Override __init__ params @@ -113,7 +111,7 @@ class Api: imgs = [] for img in init_images: - img = self.__base64_to_image(img) + img = processing_utils.decode_base64_to_image(img) imgs = [img] * p.batch_size p.init_images = imgs @@ -121,13 +119,12 @@ class Api: with self.queue_lock: processed = process_images(p) - b64images = [] - for i in processed.images: - buffer = io.BytesIO() - i.save(buffer, format="png") - b64images.append(base64.b64encode(buffer.getvalue())) - - return ImageToImageResponse(images=b64images, parameters=json.dumps(vars(img2imgreq)), info=json.dumps(processed.info)) + b64images = list(map(processing_utils.encode_pil_to_base64, processed.images)) + # for i in processed.images: + # buffer = io.BytesIO() + # i.save(buffer, format="png") + # b64images.append(base64.b64encode(buffer.getvalue())) + return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.info) def extras_single_image_api(self, req: ExtrasSingleImageRequest): upscaler1Index = upscaler_to_index(req.upscaler_1) diff --git a/modules/api/models.py b/modules/api/models.py index bbd0ef53..209f8af5 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -4,8 +4,8 @@ from modules.shared import sd_upscalers class TextToImageResponse(BaseModel): images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") - parameters: Json - info: Json + parameters: str + info: str class ExtrasBaseRequest(BaseModel): resize_mode: Literal[0, 1] = Field(default=0, title="Resize Mode", description="Sets the resize mode: 0 to upscale by upscaling_resize amount, 1 to upscale up to upscaling_resize_h x upscaling_resize_w.") -- cgit v1.2.1 From 866b36d705a338d299aba385788729d60f7d48c8 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sun, 23 Oct 2022 15:35:49 -0300 Subject: Move processing's models into models.py It didn't make sense to have two differente files for the same and "models" is a more descriptive name. --- modules/api/api.py | 57 ++++------------------- modules/api/models.py | 112 +++++++++++++++++++++++++++++++++++++++++++++- modules/api/processing.py | 106 ------------------------------------------- 3 files changed, 119 insertions(+), 156 deletions(-) delete mode 100644 modules/api/processing.py (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 3acb1f36..20e85e82 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,16 +1,11 @@ -from modules.api.processing import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI -from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images -from modules.sd_samplers import all_samplers -import modules.shared as shared import uvicorn +from gradio import processing_utils from fastapi import APIRouter, HTTPException -import json -import io -import base64 +import modules.shared as shared from modules.api.models import * -from PIL import Image +from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images +from modules.sd_samplers import all_samplers from modules.extras import run_extras -from gradio import processing_utils def upscaler_to_index(name: str): try: @@ -20,29 +15,6 @@ def upscaler_to_index(name: str): sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) -# def img_to_base64(img: str): -# buffer = io.BytesIO() -# img.save(buffer, format="png") -# return base64.b64encode(buffer.getvalue()) - -# def base64_to_bytes(base64Img: str): -# if "," in base64Img: -# base64Img = base64Img.split(",")[1] -# return io.BytesIO(base64.b64decode(base64Img)) - -# def base64_to_images(base64Imgs: list[str]): -# imgs = [] -# for img in base64Imgs: -# img = Image.open(base64_to_bytes(img)) -# imgs.append(img) -# return imgs - -class ImageToImageResponse(BaseModel): - images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") - parameters: dict - info: str - - class Api: def __init__(self, app, queue_lock): self.router = APIRouter() @@ -51,15 +23,7 @@ class Api: self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) - self.app.add_api_route("/sdapi/v1/extra-batch-image", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) - - # def __base64_to_image(self, base64_string): - # # if has a comma, deal with prefix - # if "," in base64_string: - # base64_string = base64_string.split(",")[1] - # imgdata = base64.b64decode(base64_string) - # # convert base64 to PIL image - # return Image.open(io.BytesIO(imgdata)) + self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -81,7 +45,7 @@ class Api: b64images = list(map(processing_utils.encode_pil_to_base64, processed.images)) - return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=processed.info) + return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.info) def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI): sampler_index = sampler_to_index(img2imgreq.sampler_index) @@ -120,10 +84,7 @@ class Api: processed = process_images(p) b64images = list(map(processing_utils.encode_pil_to_base64, processed.images)) - # for i in processed.images: - # buffer = io.BytesIO() - # i.save(buffer, format="png") - # b64images.append(base64.b64encode(buffer.getvalue())) + return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.info) def extras_single_image_api(self, req: ExtrasSingleImageRequest): @@ -134,12 +95,12 @@ class Api: reqDict.pop('upscaler_1') reqDict.pop('upscaler_2') - reqDict['image'] = processing_utils.decode_base64_to_file(reqDict['image']) + reqDict['image'] = processing_utils.decode_base64_to_image(reqDict['image']) with self.queue_lock: result = run_extras(**reqDict, extras_upscaler_1=upscaler1Index, extras_upscaler_2=upscaler2Index, extras_mode=0, image_folder="", input_dir="", output_dir="") - return ExtrasSingleImageResponse(image=processing_utils.encode_pil_to_base64(result[0]), html_info_x=result[1], html_info=result[2]) + return ExtrasSingleImageResponse(image=processing_utils.encode_pil_to_base64(result[0][0]), html_info_x=result[1], html_info=result[2]) def extras_batch_images_api(self, req: ExtrasBatchImagesRequest): upscaler1Index = upscaler_to_index(req.upscaler_1) diff --git a/modules/api/models.py b/modules/api/models.py index 209f8af5..362e6277 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,10 +1,118 @@ -from pydantic import BaseModel, Field, Json +import inspect +from pydantic import BaseModel, Field, Json, create_model +from typing import Any, Optional from typing_extensions import Literal +from inflection import underscore +from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img from modules.shared import sd_upscalers +API_NOT_ALLOWED = [ + "self", + "kwargs", + "sd_model", + "outpath_samples", + "outpath_grids", + "sampler_index", + "do_not_save_samples", + "do_not_save_grid", + "extra_generation_params", + "overlay_images", + "do_not_reload_embeddings", + "seed_enable_extras", + "prompt_for_display", + "sampler_noise_scheduler_override", + "ddim_discretize" +] + +class ModelDef(BaseModel): + """Assistance Class for Pydantic Dynamic Model Generation""" + + field: str + field_alias: str + field_type: Any + field_value: Any + + +class PydanticModelGenerator: + """ + Takes in created classes and stubs them out in a way FastAPI/Pydantic is happy about: + source_data is a snapshot of the default values produced by the class + params are the names of the actual keys required by __init__ + """ + + def __init__( + self, + model_name: str = None, + class_instance = None, + additional_fields = None, + ): + def field_type_generator(k, v): + # field_type = str if not overrides.get(k) else overrides[k]["type"] + # print(k, v.annotation, v.default) + field_type = v.annotation + + return Optional[field_type] + + def merge_class_params(class_): + all_classes = list(filter(lambda x: x is not object, inspect.getmro(class_))) + parameters = {} + for classes in all_classes: + parameters = {**parameters, **inspect.signature(classes.__init__).parameters} + return parameters + + + self._model_name = model_name + self._class_data = merge_class_params(class_instance) + self._model_def = [ + ModelDef( + field=underscore(k), + field_alias=k, + field_type=field_type_generator(k, v), + field_value=v.default + ) + for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED + ] + + for fields in additional_fields: + self._model_def.append(ModelDef( + field=underscore(fields["key"]), + field_alias=fields["key"], + field_type=fields["type"], + field_value=fields["default"])) + + def generate_model(self): + """ + Creates a pydantic BaseModel + from the json and overrides provided at initialization + """ + fields = { + d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias)) for d in self._model_def + } + DynamicModel = create_model(self._model_name, **fields) + DynamicModel.__config__.allow_population_by_field_name = True + DynamicModel.__config__.allow_mutation = True + return DynamicModel + +StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( + "StableDiffusionProcessingTxt2Img", + StableDiffusionProcessingTxt2Img, + [{"key": "sampler_index", "type": str, "default": "Euler"}] +).generate_model() + +StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( + "StableDiffusionProcessingImg2Img", + StableDiffusionProcessingImg2Img, + [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}] +).generate_model() + class TextToImageResponse(BaseModel): images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") - parameters: str + parameters: dict + info: str + +class ImageToImageResponse(BaseModel): + images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + parameters: dict info: str class ExtrasBaseRequest(BaseModel): diff --git a/modules/api/processing.py b/modules/api/processing.py deleted file mode 100644 index f551fa35..00000000 --- a/modules/api/processing.py +++ /dev/null @@ -1,106 +0,0 @@ -from array import array -from inflection import underscore -from typing import Any, Dict, Optional -from pydantic import BaseModel, Field, create_model -from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img -import inspect - - -API_NOT_ALLOWED = [ - "self", - "kwargs", - "sd_model", - "outpath_samples", - "outpath_grids", - "sampler_index", - "do_not_save_samples", - "do_not_save_grid", - "extra_generation_params", - "overlay_images", - "do_not_reload_embeddings", - "seed_enable_extras", - "prompt_for_display", - "sampler_noise_scheduler_override", - "ddim_discretize" -] - -class ModelDef(BaseModel): - """Assistance Class for Pydantic Dynamic Model Generation""" - - field: str - field_alias: str - field_type: Any - field_value: Any - - -class PydanticModelGenerator: - """ - Takes in created classes and stubs them out in a way FastAPI/Pydantic is happy about: - source_data is a snapshot of the default values produced by the class - params are the names of the actual keys required by __init__ - """ - - def __init__( - self, - model_name: str = None, - class_instance = None, - additional_fields = None, - ): - def field_type_generator(k, v): - # field_type = str if not overrides.get(k) else overrides[k]["type"] - # print(k, v.annotation, v.default) - field_type = v.annotation - - return Optional[field_type] - - def merge_class_params(class_): - all_classes = list(filter(lambda x: x is not object, inspect.getmro(class_))) - parameters = {} - for classes in all_classes: - parameters = {**parameters, **inspect.signature(classes.__init__).parameters} - return parameters - - - self._model_name = model_name - self._class_data = merge_class_params(class_instance) - self._model_def = [ - ModelDef( - field=underscore(k), - field_alias=k, - field_type=field_type_generator(k, v), - field_value=v.default - ) - for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED - ] - - for fields in additional_fields: - self._model_def.append(ModelDef( - field=underscore(fields["key"]), - field_alias=fields["key"], - field_type=fields["type"], - field_value=fields["default"])) - - def generate_model(self): - """ - Creates a pydantic BaseModel - from the json and overrides provided at initialization - """ - fields = { - d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias)) for d in self._model_def - } - DynamicModel = create_model(self._model_name, **fields) - DynamicModel.__config__.allow_population_by_field_name = True - DynamicModel.__config__.allow_mutation = True - return DynamicModel - -StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( - "StableDiffusionProcessingTxt2Img", - StableDiffusionProcessingTxt2Img, - [{"key": "sampler_index", "type": str, "default": "Euler"}] -).generate_model() - -StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( - "StableDiffusionProcessingImg2Img", - StableDiffusionProcessingImg2Img, - [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}] -).generate_model() \ No newline at end of file -- cgit v1.2.1 From 1e625624ba6ab3dfc167f0a5226780bb9b50fb58 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sun, 23 Oct 2022 16:01:16 -0300 Subject: Add folder processing endpoint Also minor refactor --- modules/api/api.py | 56 +++++++++++++++++++++++++++------------------------ modules/api/models.py | 6 +++++- 2 files changed, 35 insertions(+), 27 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 20e85e82..7b4fbe29 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,5 +1,5 @@ import uvicorn -from gradio import processing_utils +from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image from fastapi import APIRouter, HTTPException import modules.shared as shared from modules.api.models import * @@ -11,10 +11,18 @@ def upscaler_to_index(name: str): try: return [x.name.lower() for x in shared.sd_upscalers].index(name.lower()) except: - raise HTTPException(status_code=400, detail="Upscaler not found") + raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be on of these: {' , '.join([x.name for x in sd_upscalers])}") sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) +def setUpscalers(req: dict): + reqDict = vars(req) + reqDict['extras_upscaler_1'] = upscaler_to_index(req.upscaler_1) + reqDict['extras_upscaler_2'] = upscaler_to_index(req.upscaler_2) + reqDict.pop('upscaler_1') + reqDict.pop('upscaler_2') + return reqDict + class Api: def __init__(self, app, queue_lock): self.router = APIRouter() @@ -24,6 +32,7 @@ class Api: self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) + self.app.add_api_route("/sdapi/v1/extra-folder-images", self.extras_folder_processing_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -43,7 +52,7 @@ class Api: with self.queue_lock: processed = process_images(p) - b64images = list(map(processing_utils.encode_pil_to_base64, processed.images)) + b64images = list(map(encode_pil_to_base64, processed.images)) return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.info) @@ -60,7 +69,7 @@ class Api: mask = img2imgreq.mask if mask: - mask = processing_utils.decode_base64_to_image(mask) + mask = decode_base64_to_image(mask) populate = img2imgreq.copy(update={ # Override __init__ params @@ -75,7 +84,7 @@ class Api: imgs = [] for img in init_images: - img = processing_utils.decode_base64_to_image(img) + img = decode_base64_to_image(img) imgs = [img] * p.batch_size p.init_images = imgs @@ -83,43 +92,38 @@ class Api: with self.queue_lock: processed = process_images(p) - b64images = list(map(processing_utils.encode_pil_to_base64, processed.images)) + b64images = list(map(encode_pil_to_base64, processed.images)) return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.info) def extras_single_image_api(self, req: ExtrasSingleImageRequest): - upscaler1Index = upscaler_to_index(req.upscaler_1) - upscaler2Index = upscaler_to_index(req.upscaler_2) - - reqDict = vars(req) - reqDict.pop('upscaler_1') - reqDict.pop('upscaler_2') + reqDict = setUpscalers(req) - reqDict['image'] = processing_utils.decode_base64_to_image(reqDict['image']) + reqDict['image'] = decode_base64_to_image(reqDict['image']) with self.queue_lock: - result = run_extras(**reqDict, extras_upscaler_1=upscaler1Index, extras_upscaler_2=upscaler2Index, extras_mode=0, image_folder="", input_dir="", output_dir="") + result = run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", **reqDict) - return ExtrasSingleImageResponse(image=processing_utils.encode_pil_to_base64(result[0][0]), html_info_x=result[1], html_info=result[2]) + return ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info_x=result[1], html_info=result[2]) def extras_batch_images_api(self, req: ExtrasBatchImagesRequest): - upscaler1Index = upscaler_to_index(req.upscaler_1) - upscaler2Index = upscaler_to_index(req.upscaler_2) + reqDict = setUpscalers(req) - reqDict = vars(req) - reqDict.pop('upscaler_1') - reqDict.pop('upscaler_2') - - reqDict['image_folder'] = list(map(processing_utils.decode_base64_to_file, reqDict['imageList'])) + reqDict['image_folder'] = list(map(decode_base64_to_file, reqDict['imageList'])) reqDict.pop('imageList') with self.queue_lock: - result = run_extras(**reqDict, extras_upscaler_1=upscaler1Index, extras_upscaler_2=upscaler2Index, extras_mode=1, image="", input_dir="", output_dir="") + result = run_extras(extras_mode=1, image="", input_dir="", output_dir="", **reqDict) - return ExtrasBatchImagesResponse(images=list(map(processing_utils.encode_pil_to_base64, result[0])), html_info_x=result[1], html_info=result[2]) + return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info_x=result[1], html_info=result[2]) - def extras_folder_processing_api(self): - raise NotImplementedError + def extras_folder_processing_api(self, req:ExtrasFoldersRequest): + reqDict = setUpscalers(req) + + with self.queue_lock: + result = run_extras(extras_mode=2, image=None, image_folder=None, **reqDict) + + return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info_x=result[1], html_info=result[2]) def pnginfoapi(self): raise NotImplementedError diff --git a/modules/api/models.py b/modules/api/models.py index 362e6277..6f096807 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -150,4 +150,8 @@ class ExtrasBatchImagesRequest(ExtrasBaseRequest): imageList: list[str] = Field(title="Images", description="List of images to work on. Must be Base64 strings") class ExtrasBatchImagesResponse(ExtraBaseResponse): - images: list[str] = Field(title="Images", description="The generated images in base64 format.") \ No newline at end of file + images: list[str] = Field(title="Images", description="The generated images in base64 format.") + +class ExtrasFoldersRequest(ExtrasBaseRequest): + input_dir: str = Field(title="Input directory", description="Directory path from where to take the images") + output_dir: str = Field(title="Output directory", description="Directory path to put the processsed images into") -- cgit v1.2.1 From 90f02c75220d187e075203a4e3b450bfba392c4d Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sun, 23 Oct 2022 16:03:30 -0300 Subject: Remove unused field and class --- modules/api/api.py | 6 +++--- modules/api/models.py | 6 +----- 2 files changed, 4 insertions(+), 8 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 7b4fbe29..799e3701 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -104,7 +104,7 @@ class Api: with self.queue_lock: result = run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", **reqDict) - return ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info_x=result[1], html_info=result[2]) + return ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1]) def extras_batch_images_api(self, req: ExtrasBatchImagesRequest): reqDict = setUpscalers(req) @@ -115,7 +115,7 @@ class Api: with self.queue_lock: result = run_extras(extras_mode=1, image="", input_dir="", output_dir="", **reqDict) - return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info_x=result[1], html_info=result[2]) + return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) def extras_folder_processing_api(self, req:ExtrasFoldersRequest): reqDict = setUpscalers(req) @@ -123,7 +123,7 @@ class Api: with self.queue_lock: result = run_extras(extras_mode=2, image=None, image_folder=None, **reqDict) - return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info_x=result[1], html_info=result[2]) + return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) def pnginfoapi(self): raise NotImplementedError diff --git a/modules/api/models.py b/modules/api/models.py index 6f096807..e461d397 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -130,8 +130,7 @@ class ExtrasBaseRequest(BaseModel): extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.") class ExtraBaseResponse(BaseModel): - html_info_x: str - html_info: str + html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.") class ExtrasSingleImageRequest(ExtrasBaseRequest): image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.") @@ -139,9 +138,6 @@ class ExtrasSingleImageRequest(ExtrasBaseRequest): class ExtrasSingleImageResponse(ExtraBaseResponse): image: str = Field(default=None, title="Image", description="The generated image in base64 format.") -class SerializableImage(BaseModel): - path: str = Field(title="Path", description="The image's path ()") - class ImageItem(BaseModel): data: str = Field(title="image data") name: str = Field(title="filename") -- cgit v1.2.1 From 124e44cf1eed1edc68954f63a2a9bc428aabbcec Mon Sep 17 00:00:00 2001 From: yfszzx Date: Mon, 24 Oct 2022 09:51:56 +0800 Subject: remove browser to extension --- modules/images_history.py | 424 -------------------------------------------- modules/inspiration.py | 193 -------------------- modules/script_callbacks.py | 2 - modules/shared.py | 15 -- modules/ui.py | 20 +-- 5 files changed, 4 insertions(+), 650 deletions(-) delete mode 100644 modules/images_history.py delete mode 100644 modules/inspiration.py (limited to 'modules') diff --git a/modules/images_history.py b/modules/images_history.py deleted file mode 100644 index bc5cf11f..00000000 --- a/modules/images_history.py +++ /dev/null @@ -1,424 +0,0 @@ -import os -import shutil -import time -import hashlib -import gradio -system_bak_path = "webui_log_and_bak" -custom_tab_name = "custom fold" -faverate_tab_name = "favorites" -tabs_list = ["txt2img", "img2img", "extras", faverate_tab_name] -def is_valid_date(date): - try: - time.strptime(date, "%Y%m%d") - return True - except: - return False - -def reduplicative_file_move(src, dst): - def same_name_file(basename, path): - name, ext = os.path.splitext(basename) - f_list = os.listdir(path) - max_num = 0 - for f in f_list: - if len(f) <= len(basename): - continue - f_ext = f[-len(ext):] if len(ext) > 0 else "" - if f[:len(name)] == name and f_ext == ext: - if f[len(name)] == "(" and f[-len(ext)-1] == ")": - number = f[len(name)+1:-len(ext)-1] - if number.isdigit(): - if int(number) > max_num: - max_num = int(number) - return f"{name}({max_num + 1}){ext}" - name = os.path.basename(src) - save_name = os.path.join(dst, name) - if not os.path.exists(save_name): - shutil.move(src, dst) - else: - name = same_name_file(name, dst) - shutil.move(src, os.path.join(dst, name)) - -def traverse_all_files(curr_path, image_list, all_type=False): - try: - f_list = os.listdir(curr_path) - except: - if all_type or (curr_path[-10:].rfind(".") > 0 and curr_path[-4:] != ".txt" and curr_path[-4:] != ".csv"): - image_list.append(curr_path) - return image_list - for file in f_list: - file = os.path.join(curr_path, file) - if (not all_type) and (file[-4:] == ".txt" or file[-4:] == ".csv"): - pass - elif os.path.isfile(file) and file[-10:].rfind(".") > 0: - image_list.append(file) - else: - image_list = traverse_all_files(file, image_list) - return image_list - -def auto_sorting(dir_name): - bak_path = os.path.join(dir_name, system_bak_path) - if not os.path.exists(bak_path): - os.mkdir(bak_path) - log_file = None - files_list = [] - f_list = os.listdir(dir_name) - for file in f_list: - if file == system_bak_path: - continue - file_path = os.path.join(dir_name, file) - if not is_valid_date(file): - if file[-10:].rfind(".") > 0: - files_list.append(file_path) - else: - files_list = traverse_all_files(file_path, files_list, all_type=True) - - for file in files_list: - date_str = time.strftime("%Y%m%d",time.localtime(os.path.getmtime(file))) - file_path = os.path.dirname(file) - hash_path = hashlib.md5(file_path.encode()).hexdigest() - path = os.path.join(dir_name, date_str, hash_path) - if not os.path.exists(path): - os.makedirs(path) - if log_file is None: - log_file = open(os.path.join(bak_path,"path_mapping.csv"),"a") - log_file.write(f"{hash_path},{file_path}\n") - reduplicative_file_move(file, path) - - date_list = [] - f_list = os.listdir(dir_name) - for f in f_list: - if is_valid_date(f): - date_list.append(f) - elif f == system_bak_path: - continue - else: - try: - reduplicative_file_move(os.path.join(dir_name, f), bak_path) - except: - pass - - today = time.strftime("%Y%m%d",time.localtime(time.time())) - if today not in date_list: - date_list.append(today) - return sorted(date_list, reverse=True) - -def archive_images(dir_name, date_to): - filenames = [] - batch_size =int(opts.images_history_num_per_page * opts.images_history_pages_num) - if batch_size <= 0: - batch_size = opts.images_history_num_per_page * 6 - today = time.strftime("%Y%m%d",time.localtime(time.time())) - date_to = today if date_to is None or date_to == "" else date_to - date_to_bak = date_to - if False: #opts.images_history_reconstruct_directory: - date_list = auto_sorting(dir_name) - for date in date_list: - if date <= date_to: - path = os.path.join(dir_name, date) - if date == today and not os.path.exists(path): - continue - filenames = traverse_all_files(path, filenames) - if len(filenames) > batch_size: - break - filenames = sorted(filenames, key=lambda file: -os.path.getmtime(file)) - else: - filenames = traverse_all_files(dir_name, filenames) - total_num = len(filenames) - tmparray = [(os.path.getmtime(file), file) for file in filenames ] - date_stamp = time.mktime(time.strptime(date_to, "%Y%m%d")) + 86400 - filenames = [] - date_list = {date_to:None} - date = time.strftime("%Y%m%d",time.localtime(time.time())) - for t, f in tmparray: - date = time.strftime("%Y%m%d",time.localtime(t)) - date_list[date] = None - if t <= date_stamp: - filenames.append((t, f ,date)) - date_list = sorted(list(date_list.keys()), reverse=True) - sort_array = sorted(filenames, key=lambda x:-x[0]) - if len(sort_array) > batch_size: - date = sort_array[batch_size][2] - filenames = [x[1] for x in sort_array] - else: - date = date_to if len(sort_array) == 0 else sort_array[-1][2] - filenames = [x[1] for x in sort_array] - filenames = [x[1] for x in sort_array if x[2]>= date] - num = len(filenames) - last_date_from = date_to_bak if num == 0 else time.strftime("%Y%m%d", time.localtime(time.mktime(time.strptime(date, "%Y%m%d")) - 1000)) - date = date[:4] + "/" + date[4:6] + "/" + date[6:8] - date_to_bak = date_to_bak[:4] + "/" + date_to_bak[4:6] + "/" + date_to_bak[6:8] - load_info = "
" - load_info += f"{total_num} images in this directory. Loaded {num} images during {date} - {date_to_bak}, divided into {int((num + 1) // opts.images_history_num_per_page + 1)} pages" - load_info += "
" - _, image_list, _, _, visible_num = get_recent_images(1, 0, filenames) - return ( - date_to, - load_info, - filenames, - 1, - image_list, - "", - "", - visible_num, - last_date_from, - gradio.update(visible=total_num > num) - ) - -def delete_image(delete_num, name, filenames, image_index, visible_num): - if name == "": - return filenames, delete_num - else: - delete_num = int(delete_num) - visible_num = int(visible_num) - image_index = int(image_index) - index = list(filenames).index(name) - i = 0 - new_file_list = [] - for name in filenames: - if i >= index and i < index + delete_num: - if os.path.exists(name): - if visible_num == image_index: - new_file_list.append(name) - i += 1 - continue - print(f"Delete file {name}") - os.remove(name) - visible_num -= 1 - txt_file = os.path.splitext(name)[0] + ".txt" - if os.path.exists(txt_file): - os.remove(txt_file) - else: - print(f"Not exists file {name}") - else: - new_file_list.append(name) - i += 1 - return new_file_list, 1, visible_num - -def save_image(file_name): - if file_name is not None and os.path.exists(file_name): - shutil.copy(file_name, opts.outdir_save) - -def get_recent_images(page_index, step, filenames): - page_index = int(page_index) - num_of_imgs_per_page = int(opts.images_history_num_per_page) - max_page_index = len(filenames) // num_of_imgs_per_page + 1 - page_index = max_page_index if page_index == -1 else page_index + step - page_index = 1 if page_index < 1 else page_index - page_index = max_page_index if page_index > max_page_index else page_index - idx_frm = (page_index - 1) * num_of_imgs_per_page - image_list = filenames[idx_frm:idx_frm + num_of_imgs_per_page] - length = len(filenames) - visible_num = num_of_imgs_per_page if idx_frm + num_of_imgs_per_page <= length else length % num_of_imgs_per_page - visible_num = num_of_imgs_per_page if visible_num == 0 else visible_num - return page_index, image_list, "", "", visible_num - -def loac_batch_click(date_to): - if date_to is None: - return time.strftime("%Y%m%d",time.localtime(time.time())), [] - else: - return None, [] -def forward_click(last_date_from, date_to_recorder): - if len(date_to_recorder) == 0: - return None, [] - if last_date_from == date_to_recorder[-1]: - date_to_recorder = date_to_recorder[:-1] - if len(date_to_recorder) == 0: - return None, [] - return date_to_recorder[-1], date_to_recorder[:-1] - -def backward_click(last_date_from, date_to_recorder): - if last_date_from is None or last_date_from == "": - return time.strftime("%Y%m%d",time.localtime(time.time())), [] - if len(date_to_recorder) == 0 or last_date_from != date_to_recorder[-1]: - date_to_recorder.append(last_date_from) - return last_date_from, date_to_recorder - - -def first_page_click(page_index, filenames): - return get_recent_images(1, 0, filenames) - -def end_page_click(page_index, filenames): - return get_recent_images(-1, 0, filenames) - -def prev_page_click(page_index, filenames): - return get_recent_images(page_index, -1, filenames) - -def next_page_click(page_index, filenames): - return get_recent_images(page_index, 1, filenames) - -def page_index_change(page_index, filenames): - return get_recent_images(page_index, 0, filenames) - -def show_image_info(tabname_box, num, page_index, filenames): - file = filenames[int(num) + int((page_index - 1) * int(opts.images_history_num_per_page))] - tm = "
" + time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(os.path.getmtime(file))) + "
" - return file, tm, num, file - -def enable_page_buttons(): - return gradio.update(visible=True) - -def change_dir(img_dir, date_to): - warning = None - try: - if os.path.exists(img_dir): - try: - f = os.listdir(img_dir) - except: - warning = f"'{img_dir} is not a directory" - else: - warning = "The directory is not exist" - except: - warning = "The format of the directory is incorrect" - if warning is None: - today = time.strftime("%Y%m%d",time.localtime(time.time())) - return gradio.update(visible=False), gradio.update(visible=True), None, None if date_to != today else today, gradio.update(visible=True), gradio.update(visible=True) - else: - return gradio.update(visible=True), gradio.update(visible=False), warning, date_to, gradio.update(visible=False), gradio.update(visible=False) - -def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): - custom_dir = False - if tabname == "txt2img": - dir_name = opts.outdir_txt2img_samples - elif tabname == "img2img": - dir_name = opts.outdir_img2img_samples - elif tabname == "extras": - dir_name = opts.outdir_extras_samples - elif tabname == faverate_tab_name: - dir_name = opts.outdir_save - else: - custom_dir = True - dir_name = None - - if not custom_dir: - d = dir_name.split("/") - dir_name = d[0] - for p in d[1:]: - dir_name = os.path.join(dir_name, p) - if not os.path.exists(dir_name): - os.makedirs(dir_name) - - with gr.Column() as page_panel: - with gr.Row(): - with gr.Column(scale=1, visible=not custom_dir) as load_batch_box: - load_batch = gr.Button('Load', elem_id=tabname + "_images_history_start", full_width=True) - with gr.Column(scale=4): - with gr.Row(): - img_path = gr.Textbox(dir_name, label="Images directory", placeholder="Input images directory", interactive=custom_dir) - with gr.Row(): - with gr.Column(visible=False, scale=1) as batch_panel: - with gr.Row(): - forward = gr.Button('Prev batch') - backward = gr.Button('Next batch') - with gr.Column(scale=3): - load_info = gr.HTML(visible=not custom_dir) - with gr.Row(visible=False) as warning: - warning_box = gr.Textbox("Message", interactive=False) - - with gr.Row(visible=not custom_dir, elem_id=tabname + "_images_history") as main_panel: - with gr.Column(scale=2): - with gr.Row(visible=True) as turn_page_buttons: - #date_to = gr.Dropdown(label="Date to") - first_page = gr.Button('First Page') - prev_page = gr.Button('Prev Page') - page_index = gr.Number(value=1, label="Page Index") - next_page = gr.Button('Next Page') - end_page = gr.Button('End Page') - - history_gallery = gr.Gallery(show_label=False, elem_id=tabname + "_images_history_gallery").style(grid=opts.images_history_grid_num) - with gr.Row(): - delete_num = gr.Number(value=1, interactive=True, label="number of images to delete consecutively next") - delete = gr.Button('Delete', elem_id=tabname + "_images_history_del_button") - - with gr.Column(): - with gr.Row(): - with gr.Column(): - img_file_info = gr.Textbox(label="Generate Info", interactive=False, lines=6) - gr.HTML("
") - img_file_name = gr.Textbox(value="", label="File Name", interactive=False) - img_file_time= gr.HTML() - with gr.Row(): - if tabname != faverate_tab_name: - save_btn = gr.Button('Collect') - pnginfo_send_to_txt2img = gr.Button('Send to txt2img') - pnginfo_send_to_img2img = gr.Button('Send to img2img') - - - # hiden items - with gr.Row(visible=False): - renew_page = gr.Button('Refresh page', elem_id=tabname + "_images_history_renew_page") - batch_date_to = gr.Textbox(label="Date to") - visible_img_num = gr.Number() - date_to_recorder = gr.State([]) - last_date_from = gr.Textbox() - tabname_box = gr.Textbox(tabname) - image_index = gr.Textbox(value=-1) - set_index = gr.Button('set_index', elem_id=tabname + "_images_history_set_index") - filenames = gr.State() - all_images_list = gr.State() - hidden = gr.Image(type="pil") - info1 = gr.Textbox() - info2 = gr.Textbox() - - img_path.submit(change_dir, inputs=[img_path, batch_date_to], outputs=[warning, main_panel, warning_box, batch_date_to, load_batch_box, load_info]) - - #change batch - change_date_output = [batch_date_to, load_info, filenames, page_index, history_gallery, img_file_name, img_file_time, visible_img_num, last_date_from, batch_panel] - - batch_date_to.change(archive_images, inputs=[img_path, batch_date_to], outputs=change_date_output) - batch_date_to.change(enable_page_buttons, inputs=None, outputs=[turn_page_buttons]) - batch_date_to.change(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") - - load_batch.click(loac_batch_click, inputs=[batch_date_to], outputs=[batch_date_to, date_to_recorder]) - forward.click(forward_click, inputs=[last_date_from, date_to_recorder], outputs=[batch_date_to, date_to_recorder]) - backward.click(backward_click, inputs=[last_date_from, date_to_recorder], outputs=[batch_date_to, date_to_recorder]) - - - #delete - delete.click(delete_image, inputs=[delete_num, img_file_name, filenames, image_index, visible_img_num], outputs=[filenames, delete_num, visible_img_num]) - delete.click(fn=None, _js="images_history_delete", inputs=[delete_num, tabname_box, image_index], outputs=None) - if tabname != faverate_tab_name: - save_btn.click(save_image, inputs=[img_file_name], outputs=None) - - #turn page - gallery_inputs = [page_index, filenames] - gallery_outputs = [page_index, history_gallery, img_file_name, img_file_time, visible_img_num] - first_page.click(first_page_click, inputs=gallery_inputs, outputs=gallery_outputs) - next_page.click(next_page_click, inputs=gallery_inputs, outputs=gallery_outputs) - prev_page.click(prev_page_click, inputs=gallery_inputs, outputs=gallery_outputs) - end_page.click(end_page_click, inputs=gallery_inputs, outputs=gallery_outputs) - page_index.submit(page_index_change, inputs=gallery_inputs, outputs=gallery_outputs) - renew_page.click(page_index_change, inputs=gallery_inputs, outputs=gallery_outputs) - - first_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") - next_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") - prev_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") - end_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") - page_index.submit(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") - renew_page.click(fn=None, inputs=[tabname_box], outputs=None, _js="images_history_turnpage") - - # other funcitons - set_index.click(show_image_info, _js="images_history_get_current_img", inputs=[tabname_box, image_index, page_index, filenames], outputs=[img_file_name, img_file_time, image_index, hidden]) - img_file_name.change(fn=None, _js="images_history_enable_del_buttons", inputs=None, outputs=None) - hidden.change(fn=run_pnginfo, inputs=[hidden], outputs=[info1, img_file_info, info2]) - switch_dict["fn"](pnginfo_send_to_txt2img, switch_dict["t2i"], img_file_info, 'switch_to_txt2img') - switch_dict["fn"](pnginfo_send_to_img2img, switch_dict["i2i"], img_file_info, 'switch_to_img2img_img2img') - - - -def create_history_tabs(gr, sys_opts, cmp_ops, run_pnginfo, switch_dict): - global opts; - opts = sys_opts - loads_files_num = int(opts.images_history_num_per_page) - num_of_imgs_per_page = int(opts.images_history_num_per_page * opts.images_history_pages_num) - if cmp_ops.browse_all_images: - tabs_list.append(custom_tab_name) - with gr.Blocks(analytics_enabled=False) as images_history: - with gr.Tabs() as tabs: - for tab in tabs_list: - with gr.Tab(tab): - with gr.Blocks(analytics_enabled=False) : - show_images_history(gr, opts, tab, run_pnginfo, switch_dict) - gradio.Checkbox(opts.images_history_preload, elem_id="images_history_preload", visible=False) - gradio.Textbox(",".join(tabs_list), elem_id="images_history_tabnames_list", visible=False) - - return images_history diff --git a/modules/inspiration.py b/modules/inspiration.py deleted file mode 100644 index 29cf8297..00000000 --- a/modules/inspiration.py +++ /dev/null @@ -1,193 +0,0 @@ -import os -import random -import gradio -from modules.shared import opts -inspiration_system_path = os.path.join(opts.inspiration_dir, "system") -def read_name_list(file, types=None, keyword=None): - if not os.path.exists(file): - return [] - ret = [] - f = open(file, "r") - line = f.readline() - while len(line) > 0: - line = line.rstrip("\n") - if types is not None: - dirname = os.path.split(line) - if dirname[0] in types and keyword in dirname[1].lower(): - ret.append(line) - else: - ret.append(line) - line = f.readline() - return ret - -def save_name_list(file, name): - name_list = read_name_list(file) - if name not in name_list: - with open(file, "a") as f: - f.write(name + "\n") - -def get_types_list(): - files = os.listdir(opts.inspiration_dir) - types = [] - for x in files: - path = os.path.join(opts.inspiration_dir, x) - if x[0] == ".": - continue - if not os.path.isdir(path): - continue - if path == inspiration_system_path: - continue - types.append(x) - return types - -def get_inspiration_images(source, types, keyword): - keyword = keyword.strip(" ").lower() - get_num = int(opts.inspiration_rows_num * opts.inspiration_cols_num) - if source == "Favorites": - names = read_name_list(os.path.join(inspiration_system_path, "faverites.txt"), types, keyword) - names = random.sample(names, get_num) if len(names) > get_num else names - elif source == "Abandoned": - names = read_name_list(os.path.join(inspiration_system_path, "abandoned.txt"), types, keyword) - names = random.sample(names, get_num) if len(names) > get_num else names - elif source == "Exclude abandoned": - abandoned = read_name_list(os.path.join(inspiration_system_path, "abandoned.txt"), types, keyword) - all_names = [] - for tp in types: - name_list = os.listdir(os.path.join(opts.inspiration_dir, tp)) - all_names += [os.path.join(tp, x) for x in name_list if keyword in x.lower()] - - if len(all_names) > get_num: - names = [] - while len(names) < get_num: - name = random.choice(all_names) - if name not in abandoned: - names.append(name) - else: - names = all_names - else: - all_names = [] - for tp in types: - name_list = os.listdir(os.path.join(opts.inspiration_dir, tp)) - all_names += [os.path.join(tp, x) for x in name_list if keyword in x.lower()] - names = random.sample(all_names, get_num) if len(all_names) > get_num else all_names - image_list = [] - for a in names: - image_path = os.path.join(opts.inspiration_dir, a) - images = os.listdir(image_path) - if len(images) > 0: - image_list.append((os.path.join(image_path, random.choice(images)), a)) - else: - print(image_path) - return image_list, names - -def select_click(index, name_list): - name = name_list[int(index)] - path = os.path.join(opts.inspiration_dir, name) - images = os.listdir(path) - return name, [os.path.join(path, x) for x in images], "" - -def give_up_click(name): - file = os.path.join(inspiration_system_path, "abandoned.txt") - save_name_list(file, name) - return "Added to abandoned list" - -def collect_click(name): - file = os.path.join(inspiration_system_path, "faverites.txt") - save_name_list(file, name) - return "Added to faverite list" - -def moveout_click(name, source): - if source == "Abandoned": - file = os.path.join(inspiration_system_path, "abandoned.txt") - elif source == "Favorites": - file = os.path.join(inspiration_system_path, "faverites.txt") - else: - return None - name_list = read_name_list(file) - os.remove(file) - with open(file, "a") as f: - for a in name_list: - if a != name: - f.write(a + "\n") - return f"Moved out {name} from {source} list" - -def source_change(source): - if source in ["Abandoned", "Favorites"]: - return gradio.update(visible=True), [] - else: - return gradio.update(visible=False), [] -def add_to_prompt(name, prompt): - name = os.path.basename(name) - return prompt + "," + name - -def clear_keyword(): - return "" - -def ui(gr, opts, txt2img_prompt, img2img_prompt): - with gr.Blocks(analytics_enabled=False) as inspiration: - flag = os.path.exists(opts.inspiration_dir) - if flag: - types = get_types_list() - flag = len(types) > 0 - else: - os.makedirs(opts.inspiration_dir) - if not flag: - gr.HTML(""" -

To activate inspiration function, you need get "inspiration" images first.


- You can create these images by run "Create inspiration images" script in txt2img page,
you can get the artists or art styles list from here
- https://github.com/pharmapsychotic/clip-interrogator/tree/main/data
- download these files, and select these files in the "Create inspiration images" script UI
- There about 6000 artists and art styles in these files.
This takes server hours depending on your GPU type and how many pictures you generate for each artist/style -
I suggest at least four images for each


-

You can also download generated pictures from here:


- https://huggingface.co/datasets/yfszzx/inspiration
- unzip the file to the project directory of webui
- and restart webui, and enjoy the joy of creation!
- """) - return inspiration - if not os.path.exists(inspiration_system_path): - os.mkdir(inspiration_system_path) - with gr.Row(): - with gr.Column(scale=2): - inspiration_gallery = gr.Gallery(show_label=False, elem_id="inspiration_gallery").style(grid=opts.inspiration_cols_num, height='auto') - with gr.Column(scale=1): - types = gr.CheckboxGroup(choices=types, value=types) - with gr.Row(): - source = gr.Dropdown(choices=["All", "Favorites", "Exclude abandoned", "Abandoned"], value="Exclude abandoned", label="Source") - keyword = gr.Textbox("", label="Key word") - get_inspiration = gr.Button("Get inspiration", elem_id="inspiration_get_button") - name = gr.Textbox(show_label=False, interactive=False) - with gr.Row(): - send_to_txt2img = gr.Button('to txt2img') - send_to_img2img = gr.Button('to img2img') - collect = gr.Button('Collect') - give_up = gr.Button("Don't show again") - moveout = gr.Button("Move out", visible=False) - warning = gr.HTML() - style_gallery = gr.Gallery(show_label=False).style(grid=2, height='auto') - - - - with gr.Row(visible=False): - select_button = gr.Button('set button', elem_id="inspiration_select_button") - name_list = gr.State() - - get_inspiration.click(get_inspiration_images, inputs=[source, types, keyword], outputs=[inspiration_gallery, name_list]) - keyword.submit(fn=None, _js="inspiration_click_get_button", inputs=None, outputs=None) - source.change(source_change, inputs=[source], outputs=[moveout, style_gallery]) - source.change(fn=clear_keyword, _js="inspiration_click_get_button", inputs=None, outputs=[keyword]) - types.change(fn=clear_keyword, _js="inspiration_click_get_button", inputs=None, outputs=[keyword]) - - select_button.click(select_click, _js="inspiration_selected", inputs=[name, name_list], outputs=[name, style_gallery, warning]) - give_up.click(give_up_click, inputs=[name], outputs=[warning]) - collect.click(collect_click, inputs=[name], outputs=[warning]) - moveout.click(moveout_click, inputs=[name, source], outputs=[warning]) - moveout.click(fn=None, _js="inspiration_click_get_button", inputs=None, outputs=None) - - send_to_txt2img.click(add_to_prompt, inputs=[name, txt2img_prompt], outputs=[txt2img_prompt]) - send_to_img2img.click(add_to_prompt, inputs=[name, img2img_prompt], outputs=[img2img_prompt]) - send_to_txt2img.click(collect_click, inputs=[name], outputs=[warning]) - send_to_img2img.click(collect_click, inputs=[name], outputs=[warning]) - send_to_txt2img.click(None, _js='switch_to_txt2img', inputs=None, outputs=None) - send_to_img2img.click(None, _js="switch_to_img2img_img2img", inputs=None, outputs=None) - return inspiration diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 5bcccd67..66666a56 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -1,4 +1,3 @@ - callbacks_model_loaded = [] callbacks_ui_tabs = [] callbacks_ui_settings = [] @@ -16,7 +15,6 @@ def model_loaded_callback(sd_model): def ui_tabs_callback(): res = [] - for callback in callbacks_ui_tabs: res += callback() or [] diff --git a/modules/shared.py b/modules/shared.py index 0aaaadac..5dfd7927 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -321,21 +321,6 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), })) -options_templates.update(options_section(('inspiration', "Inspiration"), { - "inspiration_dir": OptionInfo("inspiration", "Directory of inspiration", component_args=hide_dirs), - "inspiration_max_samples": OptionInfo(4, "Maximum number of samples, used to determine which folders to skip when continue running the create script", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}), - "inspiration_rows_num": OptionInfo(4, "Rows of inspiration interface frame", gr.Slider, {"minimum": 4, "maximum": 16, "step": 1}), - "inspiration_cols_num": OptionInfo(8, "Columns of inspiration interface frame", gr.Slider, {"minimum": 4, "maximum": 16, "step": 1}), -})) - -options_templates.update(options_section(('images-history', "Images Browser"), { - #"images_history_reconstruct_directory": OptionInfo(False, "Reconstruct output directory structure.This can greatly improve the speed of loading , but will change the original output directory structure"), - "images_history_preload": OptionInfo(False, "Preload images at startup"), - "images_history_num_per_page": OptionInfo(36, "Number of pictures displayed on each page"), - "images_history_pages_num": OptionInfo(6, "Minimum number of pages per load "), - "images_history_grid_num": OptionInfo(6, "Number of grids in each row"), - -})) class Options: data = None diff --git a/modules/ui.py b/modules/ui.py index a73175f5..fa42712e 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -49,14 +49,12 @@ from modules.sd_hijack import model_hijack from modules.sd_samplers import samplers, samplers_for_img2img import modules.textual_inversion.ui import modules.hypernetworks.ui -import modules.images_history as images_history -import modules.inspiration as inspiration - - # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI mimetypes.init() mimetypes.add_type('application/javascript', '.js') +txt2img_paste_fields = [] +img2img_paste_fields = [] if not cmd_opts.share and not cmd_opts.listen: @@ -1193,16 +1191,7 @@ def create_ui(wrap_gradio_gpu_call): inputs=[image], outputs=[html, generation_info, html2], ) - #images history - images_history_switch_dict = { - "fn": modules.generation_parameters_copypaste.connect_paste, - "t2i": txt2img_paste_fields, - "i2i": img2img_paste_fields - } - - browser_interface = images_history.create_history_tabs(gr, opts, cmd_opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict) - inspiration_interface = inspiration.ui(gr, opts, txt2img_prompt, img2img_prompt) - + with gr.Blocks() as modelmerger_interface: with gr.Row().style(equal_height=False): with gr.Column(variant='panel'): @@ -1651,8 +1640,6 @@ Requested path was: {f} (img2img_interface, "img2img", "img2img"), (extras_interface, "Extras", "extras"), (pnginfo_interface, "PNG Info", "pnginfo"), - (inspiration_interface, "Inspiration", "inspiration"), - (browser_interface , "Image Browser", "images_history"), (modelmerger_interface, "Checkpoint Merger", "modelmerger"), (train_interface, "Train", "ti"), ] @@ -1896,6 +1883,7 @@ def load_javascript(raw_response): javascript = f'' scripts_list = modules.scripts.list_scripts("javascript", ".js") + scripts_list += modules.scripts.list_scripts("scripts", ".js") for basedir, filename, path in scripts_list: with open(path, "r", encoding="utf8") as jsfile: javascript += f"\n" -- cgit v1.2.1 From cef1b89aa2e6c7647db7e93a4cd4ec020da3f2da Mon Sep 17 00:00:00 2001 From: yfszzx Date: Mon, 24 Oct 2022 10:10:33 +0800 Subject: remove browser to extension --- modules/script_callbacks.py | 2 ++ modules/shared.py | 1 - modules/ui.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 66666a56..f46d3d9a 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -1,3 +1,4 @@ + callbacks_model_loaded = [] callbacks_ui_tabs = [] callbacks_ui_settings = [] @@ -15,6 +16,7 @@ def model_loaded_callback(sd_model): def ui_tabs_callback(): res = [] + for callback in callbacks_ui_tabs: res += callback() or [] diff --git a/modules/shared.py b/modules/shared.py index 5dfd7927..6541e679 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -82,7 +82,6 @@ parser.add_argument("--api", action='store_true', help="use api=True to launch t parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the api instead of the webui") parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI") parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None) -parser.add_argument("--browse-all-images", action='store_true', help="Allow browsing all images by Image Browser", default=False) cmd_opts = parser.parse_args() restricted_opts = [ diff --git a/modules/ui.py b/modules/ui.py index fa42712e..a32f7259 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1104,7 +1104,7 @@ def create_ui(wrap_gradio_gpu_call): upscaling_crop = gr.Checkbox(label='Crop to fit', value=True) with gr.Group(): - extras_upscaler_1 = gr.Radio(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers] , value=shared.sd_upscalers[0].name, type="index") + extras_upscaler_1 = gr.Radio(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") with gr.Group(): extras_upscaler_2 = gr.Radio(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") -- cgit v1.2.1 From a889c93f23f1e80d0dac4e5ddbc3a26207e8cdf1 Mon Sep 17 00:00:00 2001 From: yfszzx Date: Mon, 24 Oct 2022 11:13:16 +0800 Subject: paste_fields add to public --- modules/ui.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index a32f7259..a73b9ff0 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -784,6 +784,7 @@ def create_ui(wrap_gradio_gpu_call): ] ) + global txt2img_paste_fields txt2img_paste_fields = [ (txt2img_prompt, "Prompt"), (txt2img_negative_prompt, "Negative prompt"), @@ -1054,6 +1055,7 @@ def create_ui(wrap_gradio_gpu_call): outputs=[prompt, negative_prompt, style1, style2], ) + global img2img_paste_fields img2img_paste_fields = [ (img2img_prompt, "Prompt"), (img2img_negative_prompt, "Negative prompt"), -- cgit v1.2.1 From 974196932583b96b6b76632052fc0d7e70820bf3 Mon Sep 17 00:00:00 2001 From: Vladimir Repin <32306715+mezotaken@users.noreply.github.com> Date: Sun, 23 Oct 2022 22:38:42 +0300 Subject: Save properly processed image before color correction --- modules/processing.py | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index ff83023c..15b639e1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -46,6 +46,20 @@ def apply_color_correction(correction, image): return image +def apply_overlay(overlay_exists, overlay, paste_loc, image): + if overlay_exists: + if paste_loc is not None: + x, y, w, h = paste_loc + base_image = Image.new('RGBA', (overlay.width, overlay.height)) + image = images.resize_image(1, image, w, h) + base_image.paste(image, (x, y)) + image = base_image + + image = image.convert('RGBA') + image.alpha_composite(overlay) + image = image.convert('RGB') + + return image def get_correct_sampler(p): if isinstance(p, modules.processing.StableDiffusionProcessingTxt2Img): @@ -446,25 +460,14 @@ def process_images(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() image = Image.fromarray(x_sample) - + if p.color_corrections is not None and i < len(p.color_corrections): if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction: - images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction") + image_without_cc = apply_overlay(p.overlay_images is not None and i < len(p.overlay_images), p.overlay_images[i], p.paste_to, image) + images.save_image(image_without_cc, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction") image = apply_color_correction(p.color_corrections[i], image) - if p.overlay_images is not None and i < len(p.overlay_images): - overlay = p.overlay_images[i] - - if p.paste_to is not None: - x, y, w, h = p.paste_to - base_image = Image.new('RGBA', (overlay.width, overlay.height)) - image = images.resize_image(1, image, w, h) - base_image.paste(image, (x, y)) - image = base_image - - image = image.convert('RGBA') - image.alpha_composite(overlay) - image = image.convert('RGB') + image = apply_overlay(p.overlay_images is not None and i < len(p.overlay_images), p.overlay_images[i], p.paste_to, image) if opts.samples_save and not p.do_not_save_samples: images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p) -- cgit v1.2.1 From f2cc3f32d5bc8538e95edec54d7dc1b9efdf769a Mon Sep 17 00:00:00 2001 From: Vladimir Repin <32306715+mezotaken@users.noreply.github.com> Date: Sun, 23 Oct 2022 22:44:46 +0300 Subject: fix whitespaces --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 15b639e1..2a332514 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -460,7 +460,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() image = Image.fromarray(x_sample) - + if p.color_corrections is not None and i < len(p.color_corrections): if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction: image_without_cc = apply_overlay(p.overlay_images is not None and i < len(p.overlay_images), p.overlay_images[i], p.paste_to, image) -- cgit v1.2.1 From b297cc3324979ec78d69b2d11dd18030dfad7bcc Mon Sep 17 00:00:00 2001 From: AngelBottomless <35677394+aria1th@users.noreply.github.com> Date: Sun, 23 Oct 2022 20:06:42 +0900 Subject: Hypernetworks - fix KeyError in statistics caching Statistics logging has changed to {filename : list[losses]}, so it has to use loss_info[key].pop() --- modules/hypernetworks/hypernetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 98a7b62e..33827210 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -274,8 +274,8 @@ def log_statistics(loss_info:dict, key, value): loss_info[key] = [value] else: loss_info[key].append(value) - if len(loss_info) > 1024: - loss_info.pop(0) + if len(loss_info[key]) > 1024: + loss_info[key].pop(0) def statistics(data): -- cgit v1.2.1 From 40b56c9289bf9458ae5ef3c1990ccea851c6c3e2 Mon Sep 17 00:00:00 2001 From: AngelBottomless <35677394+aria1th@users.noreply.github.com> Date: Sun, 23 Oct 2022 21:07:07 +0900 Subject: cleanup some code --- modules/hypernetworks/hypernetwork.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 33827210..4072bf54 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -16,6 +16,7 @@ from modules.textual_inversion import textual_inversion from modules.textual_inversion.learn_schedule import LearnRateScheduler from torch import einsum +from collections import defaultdict, deque from statistics import stdev, mean class HypernetworkModule(torch.nn.Module): @@ -269,15 +270,6 @@ def stack_conds(conds): return torch.stack(conds) -def log_statistics(loss_info:dict, key, value): - if key not in loss_info: - loss_info[key] = [value] - else: - loss_info[key].append(value) - if len(loss_info[key]) > 1024: - loss_info[key].pop(0) - - def statistics(data): total_information = f"loss:{mean(data):.3f}"+u"\u00B1"+f"({stdev(data)/ (len(data)**0.5):.3f})" recent_data = data[-32:] @@ -341,7 +333,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log weight.requires_grad = True size = len(ds.indexes) - loss_dict = {} + loss_dict = defaultdict(lambda : deque(maxlen = 1024)) losses = torch.zeros((size,)) previous_mean_loss = 0 print("Mean loss of {} elements".format(size)) @@ -383,7 +375,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log losses[hypernetwork.step % losses.shape[0]] = loss.item() for entry in entries: - log_statistics(loss_dict, entry.filename, loss.item()) + loss_dict[entry.filename].append(loss.item()) optimizer.zero_grad() weights[0].grad = None -- cgit v1.2.1 From 348f89c8d40397c1875cff4a7331018785f9c3b8 Mon Sep 17 00:00:00 2001 From: AngelBottomless <35677394+aria1th@users.noreply.github.com> Date: Sun, 23 Oct 2022 21:29:53 +0900 Subject: statistics for pbar --- modules/hypernetworks/hypernetwork.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 4072bf54..48b56029 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -335,6 +335,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log size = len(ds.indexes) loss_dict = defaultdict(lambda : deque(maxlen = 1024)) losses = torch.zeros((size,)) + previous_mean_losses = [0] previous_mean_loss = 0 print("Mean loss of {} elements".format(size)) @@ -356,7 +357,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log for i, entries in pbar: hypernetwork.step = i + ititial_step if len(loss_dict) > 0: - previous_mean_loss = sum(i[-1] for i in loss_dict.values()) / len(loss_dict) + previous_mean_losses = [i[-1] for i in loss_dict.values()] + previous_mean_loss = mean(previous_mean_losses) scheduler.apply(optimizer, hypernetwork.step) if scheduler.finished: @@ -391,7 +393,13 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log if torch.isnan(losses[hypernetwork.step % losses.shape[0]]): raise RuntimeError("Loss diverged.") - pbar.set_description(f"dataset loss: {previous_mean_loss:.7f}") + + if len(previous_mean_losses) > 1: + std = stdev(previous_mean_losses) + else: + std = 0 + dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})" + pbar.set_description(dataset_loss_info) if hypernetwork.step > 0 and hypernetwork_dir is not None and hypernetwork.step % save_hypernetwork_every == 0: # Before saving, change name to match current checkpoint. -- cgit v1.2.1 From 0d2e1dac407a0e2f5b148d314715f0457b2525b7 Mon Sep 17 00:00:00 2001 From: AngelBottomless <35677394+aria1th@users.noreply.github.com> Date: Sun, 23 Oct 2022 21:41:39 +0900 Subject: convert deque -> list I don't feel this being efficient --- modules/hypernetworks/hypernetwork.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 48b56029..fb510fa7 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -282,7 +282,7 @@ def report_statistics(loss_info:dict): for key in keys: try: print("Loss statistics for file " + key) - info, recent = statistics(loss_info[key]) + info, recent = statistics(list(loss_info[key])) print(info) print(recent) except Exception as e: -- cgit v1.2.1 From e9a410b5357612f63528015c5533c2185dcff92e Mon Sep 17 00:00:00 2001 From: AngelBottomless <35677394+aria1th@users.noreply.github.com> Date: Sun, 23 Oct 2022 21:47:39 +0900 Subject: check length for variance --- modules/hypernetworks/hypernetwork.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index fb510fa7..d647ea55 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -271,9 +271,17 @@ def stack_conds(conds): def statistics(data): - total_information = f"loss:{mean(data):.3f}"+u"\u00B1"+f"({stdev(data)/ (len(data)**0.5):.3f})" + if len(data) < 2: + std = 0 + else: + std = stdev(data) + total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})" recent_data = data[-32:] - recent_information = f"recent 32 loss:{mean(recent_data):.3f}"+u"\u00B1"+f"({stdev(recent_data)/ (len(recent_data)**0.5):.3f})" + if len(recent_data) < 2: + std = 0 + else: + std = stdev(recent_data) + recent_information = f"recent 32 loss:{mean(recent_data):.3f}" + u"\u00B1" + f"({std / (len(recent_data) ** 0.5):.3f})" return total_information, recent_information -- cgit v1.2.1 From 6cbb04f7a5e675cf1f6dfc247aa9c9e8df7dc5ce Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 24 Oct 2022 09:15:26 +0300 Subject: fix #3517 breaking txt2img --- modules/processing.py | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 2a332514..c61bbfbd 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -46,18 +46,23 @@ def apply_color_correction(correction, image): return image -def apply_overlay(overlay_exists, overlay, paste_loc, image): - if overlay_exists: - if paste_loc is not None: - x, y, w, h = paste_loc - base_image = Image.new('RGBA', (overlay.width, overlay.height)) - image = images.resize_image(1, image, w, h) - base_image.paste(image, (x, y)) - image = base_image - - image = image.convert('RGBA') - image.alpha_composite(overlay) - image = image.convert('RGB') + +def apply_overlay(image, paste_loc, index, overlays): + if overlays is None or index >= len(overlays): + return image + + overlay = overlays[index] + + if paste_loc is not None: + x, y, w, h = paste_loc + base_image = Image.new('RGBA', (overlay.width, overlay.height)) + image = images.resize_image(1, image, w, h) + base_image.paste(image, (x, y)) + image = base_image + + image = image.convert('RGBA') + image.alpha_composite(overlay) + image = image.convert('RGB') return image @@ -463,11 +468,11 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if p.color_corrections is not None and i < len(p.color_corrections): if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction: - image_without_cc = apply_overlay(p.overlay_images is not None and i < len(p.overlay_images), p.overlay_images[i], p.paste_to, image) + image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images) images.save_image(image_without_cc, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction") image = apply_color_correction(p.color_corrections[i], image) - image = apply_overlay(p.overlay_images is not None and i < len(p.overlay_images), p.overlay_images[i], p.paste_to, image) + image = apply_overlay(image, p.paste_to, i, p.overlay_images) if opts.samples_save and not p.do_not_save_samples: images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p) -- cgit v1.2.1 From 734986dde3231416813f827242c111da212b2ccb Mon Sep 17 00:00:00 2001 From: Trung Ngo Date: Mon, 24 Oct 2022 01:17:09 -0500 Subject: add callback after image is saved --- modules/images.py | 3 ++- modules/script_callbacks.py | 12 +++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index b9589563..01c60f89 100644 --- a/modules/images.py +++ b/modules/images.py @@ -12,7 +12,7 @@ from PIL import Image, ImageFont, ImageDraw, PngImagePlugin from fonts.ttf import Roboto import string -from modules import sd_samplers, shared +from modules import sd_samplers, shared, script_callbacks from modules.shared import opts, cmd_opts LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) @@ -467,6 +467,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i else: txt_fullfn = None + script_callbacks.image_saved_callback(image, p, fullfn, txt_fullfn) return fullfn, txt_fullfn diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 5bcccd67..5836e4b9 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -2,11 +2,12 @@ callbacks_model_loaded = [] callbacks_ui_tabs = [] callbacks_ui_settings = [] - +callbacks_image_saved = [] def clear_callbacks(): callbacks_model_loaded.clear() callbacks_ui_tabs.clear() + callbacks_image_saved.clear() def model_loaded_callback(sd_model): @@ -28,6 +29,10 @@ def ui_settings_callback(): callback() +def image_saved_callback(image, p, fullfn, txt_fullfn): + for callback in callbacks_image_saved: + callback(image, p, fullfn, txt_fullfn) + def on_model_loaded(callback): """register a function to be called when the stable diffusion model is created; the model is passed as an argument""" @@ -51,3 +56,8 @@ def on_ui_settings(callback): """register a function to be called before UI settings are populated; add your settings by using shared.opts.add_option(shared.OptionInfo(...)) """ callbacks_ui_settings.append(callback) + + +def on_save_imaged(callback): + """register a function to call after modules.images.save_image is called returning same values, original image and p """ + callbacks_image_saved.append(callback) -- cgit v1.2.1 From 876a96f0f9843382ebc8984db3de5d8af0e9ce4c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 24 Oct 2022 09:39:46 +0300 Subject: remove erroneous dir in the extension directory remove loading .js files from scripts dir (they go into javascript) load scripts after models, for scripts that depend on loaded models --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index a73b9ff0..03528968 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1885,7 +1885,7 @@ def load_javascript(raw_response): javascript = f'' scripts_list = modules.scripts.list_scripts("javascript", ".js") - scripts_list += modules.scripts.list_scripts("scripts", ".js") + for basedir, filename, path in scripts_list: with open(path, "r", encoding="utf8") as jsfile: javascript += f"\n" -- cgit v1.2.1 From 3be6b29d81408d2adb741bff5b11c80214aa621e Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 24 Oct 2022 15:14:34 +0900 Subject: indent=4 config.json --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 6541e679..d6ddfe59 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -348,7 +348,7 @@ class Options: def save(self, filename): with open(filename, "w", encoding="utf8") as file: - json.dump(self.data, file) + json.dump(self.data, file, indent=4) def same_type(self, x, y): if x is None or y is None: -- cgit v1.2.1 From c5d90628a4058bf49c2fdabf620a24db73407f31 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 22 Oct 2022 17:16:55 +0900 Subject: move "file_decoration" initialize section into "if forced_filename is None:" no need to initialize it if it's not going to be used --- modules/images.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index b9589563..50a59cff 100644 --- a/modules/images.py +++ b/modules/images.py @@ -386,18 +386,6 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i txt_fullfn (`str` or None): If a text file is saved for this image, this will be its full path. Otherwise None. ''' - if short_filename or prompt is None or seed is None: - file_decoration = "" - elif opts.save_to_dirs: - file_decoration = opts.samples_filename_pattern or "[seed]" - else: - file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]" - - if file_decoration != "": - file_decoration = "-" + file_decoration.lower() - - file_decoration = apply_filename_pattern(file_decoration, p, seed, prompt) + suffix - if extension == 'png' and opts.enable_pnginfo and info is not None: pnginfo = PngImagePlugin.PngInfo() @@ -419,6 +407,18 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i os.makedirs(path, exist_ok=True) if forced_filename is None: + if short_filename or prompt is None or seed is None: + file_decoration = "" + elif opts.save_to_dirs: + file_decoration = opts.samples_filename_pattern or "[seed]" + else: + file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]" + + if file_decoration != "": + file_decoration = "-" + file_decoration.lower() + + file_decoration = apply_filename_pattern(file_decoration, p, seed, prompt) + suffix + basecount = get_next_sequence_number(path, basename) fullfn = "a.png" fullfn_without_extension = "a" -- cgit v1.2.1 From 7d4a4db9ea7543c079f4a4a702c2945f4b66cd11 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 22 Oct 2022 17:48:59 +0900 Subject: modify unnecessary sting assignment as it's going to get overwritten --- modules/images.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index 50a59cff..cc5066b1 100644 --- a/modules/images.py +++ b/modules/images.py @@ -420,8 +420,8 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i file_decoration = apply_filename_pattern(file_decoration, p, seed, prompt) + suffix basecount = get_next_sequence_number(path, basename) - fullfn = "a.png" - fullfn_without_extension = "a" + fullfn = None + fullfn_without_extension = None for i in range(500): fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}" fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}") -- cgit v1.2.1 From 37dd6deafb831a809eaf7ae8d232937a8c7998e7 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 22 Oct 2022 21:11:15 +0900 Subject: filename pattern [datetime], extended customizable Format and Time Zone format: [datetime] [datetime] [datetime