From 68999d0b15d612965e7bc7feb62d6b4d55e112fa Mon Sep 17 00:00:00 2001
From: space-nuko <24979496+space-nuko@users.noreply.github.com>
Date: Sat, 25 Mar 2023 12:52:14 -0400
Subject: Add upscale slider to img2img
---
modules/generation_parameters_copypaste.py | 3 ++
modules/img2img.py | 3 +-
modules/processing.py | 18 +++++++-
modules/ui.py | 67 +++++++++++++++++++++++++++++-
4 files changed, 87 insertions(+), 4 deletions(-)
(limited to 'modules')
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 6df76858..459de080 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -282,6 +282,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
res["Hires resize-1"] = 0
res["Hires resize-2"] = 0
+ if "Img2Img Upscale" not in res:
+ res["Img2Img Upscale"] = 1
+
restore_old_hires_fix_params(res)
return res
diff --git a/modules/img2img.py b/modules/img2img.py
index c973b770..d05fa750 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
processed_image.save(os.path.join(output_dir, filename))
-def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
+def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, scale: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
override_settings = create_override_settings_dict(override_settings_texts)
is_batch = mode == 5
@@ -149,6 +149,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
inpaint_full_res_padding=inpaint_full_res_padding,
inpainting_mask_invert=inpainting_mask_invert,
override_settings=override_settings,
+ scale=scale,
)
p.scripts = modules.scripts.scripts_txt2img
diff --git a/modules/processing.py b/modules/processing.py
index 2e5a363f..fc4b166c 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -929,7 +929,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler = None
- def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs):
+ def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, **kwargs):
super().__init__(**kwargs)
self.init_images = init_images
@@ -949,11 +949,27 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.mask = None
self.nmask = None
self.image_conditioning = None
+ self.scale = scale
+
+ def get_final_size(self):
+ if self.scale > 1:
+ img = self.init_images[0]
+ width = int(img.width * self.scale)
+ height = int(img.height * self.scale)
+ return width, height
+ else:
+ return self.width, self.height
+
def init(self, all_prompts, all_seeds, all_subseeds):
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
crop_region = None
+ if self.scale > 1:
+ self.extra_generation_params["Img2Img Upscale"] = self.scale
+
+ self.width, self.height = self.get_final_size()
+
image_mask = self.image_mask
if image_mask is not None:
diff --git a/modules/ui.py b/modules/ui.py
index af8546c2..bb548f92 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -15,6 +15,7 @@ import warnings
import gradio as gr
import gradio.routes
import gradio.utils
+from gradio.events import Releaseable
import numpy as np
from PIL import Image, PngImagePlugin
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
@@ -138,6 +139,26 @@ def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resiz
return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}"
+def calc_resolution_img2img(mode, scale, resize_x, resize_y, resize_mode, *i2i_images):
+ init_img = None
+ if mode in {0, 1, 3, 4}:
+ init_img = i2i_images[mode]
+ elif mode == 2:
+ init_img = i2i_images[mode]["image"]
+
+ if not init_img:
+ return ""
+
+ if scale > 1:
+ width = int(init_img.width * scale)
+ height = int(init_img.height * scale)
+ else:
+ width = resize_x
+ height = resize_y
+
+ return f"resize: from {init_img.width}x{init_img.height} to {width}x{height}"
+
+
def apply_styles(prompt, prompt_neg, styles):
prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles)
prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles)
@@ -755,8 +776,13 @@ def create_ui():
elif category == "dimensions":
with FormRow():
with gr.Column(elem_id="img2img_column_size", scale=4):
- width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
- height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
+ with FormRow(variant="compact"):
+ final_resolution = FormHTML(value="", elem_id="img2img_finalres", label="Upscaled resolution", interactive=False)
+ with FormRow(variant="compact"):
+ scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=1.0, elem_id="img2img_scale")
+ with FormRow(variant="compact"):
+ width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
+ height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"):
res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn")
@@ -824,6 +850,41 @@ def create_ui():
outputs=[inpaint_controls, mask_alpha],
)
+ img2img_resolution_preview_inputs = [dummy_component, # filled in by selected img2img tab index in _js
+ scale, width, height, resize_mode,
+ init_img, sketch, init_img_with_mask, inpaint_color_sketch, init_img_inpaint]
+ for input in img2img_resolution_preview_inputs:
+ if isinstance(input, Releaseable):
+ input.release(
+ fn=calc_resolution_img2img,
+ _js="get_img2img_tab_index_for_res_preview",
+ inputs=img2img_resolution_preview_inputs,
+ outputs=[final_resolution],
+ show_progress=False,
+ )
+ input.release(
+ None,
+ _js="onCalcResolutionImg2Img",
+ inputs=img2img_resolution_preview_inputs,
+ outputs=[],
+ show_progress=False,
+ )
+ else:
+ input.change(
+ fn=calc_resolution_img2img,
+ _js="get_img2img_tab_index_for_res_preview",
+ inputs=img2img_resolution_preview_inputs,
+ outputs=[final_resolution],
+ show_progress=False,
+ )
+ input.change(
+ None,
+ _js="onCalcResolutionImg2Img",
+ inputs=img2img_resolution_preview_inputs,
+ outputs=[],
+ show_progress=False,
+ )
+
img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
@@ -872,6 +933,7 @@ def create_ui():
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
height,
width,
+ scale,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
@@ -957,6 +1019,7 @@ def create_ui():
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
+ (scale, "Img2Img Upscale"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
--
cgit v1.2.1
From 7ea5d395c44be208f654b07ec7993aa2952f2510 Mon Sep 17 00:00:00 2001
From: space-nuko <24979496+space-nuko@users.noreply.github.com>
Date: Sun, 19 Feb 2023 03:45:43 -0800
Subject: Add upscaler to img2img
---
modules/generation_parameters_copypaste.py | 4 ++--
modules/img2img.py | 3 ++-
modules/processing.py | 23 +++++++++++++++++------
modules/ui.py | 10 +++++++---
4 files changed, 28 insertions(+), 12 deletions(-)
(limited to 'modules')
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 459de080..0ad2ad4f 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -282,8 +282,8 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
res["Hires resize-1"] = 0
res["Hires resize-2"] = 0
- if "Img2Img Upscale" not in res:
- res["Img2Img Upscale"] = 1
+ if "Img2Img upscale" not in res:
+ res["Img2Img upscale"] = 1
restore_old_hires_fix_params(res)
diff --git a/modules/img2img.py b/modules/img2img.py
index d05fa750..959dd96e 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
processed_image.save(os.path.join(output_dir, filename))
-def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, scale: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
+def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, scale: float, upscaler: str, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
override_settings = create_override_settings_dict(override_settings_texts)
is_batch = mode == 5
@@ -150,6 +150,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
inpainting_mask_invert=inpainting_mask_invert,
override_settings=override_settings,
scale=scale,
+ upscaler=upscaler,
)
p.scripts = modules.scripts.scripts_txt2img
diff --git a/modules/processing.py b/modules/processing.py
index fc4b166c..afb8cfd1 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -929,7 +929,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler = None
- def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, **kwargs):
+ def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, upscaler: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
self.init_images = init_images
@@ -950,6 +950,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.nmask = None
self.image_conditioning = None
self.scale = scale
+ self.upscaler = upscaler
def get_final_size(self):
if self.scale > 1:
@@ -966,7 +967,16 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
crop_region = None
if self.scale > 1:
- self.extra_generation_params["Img2Img Upscale"] = self.scale
+ self.extra_generation_params["Img2Img upscale"] = self.scale
+
+ # Non-latent upscalers are run before sampling
+ # Latent upscalers are run during sampling
+ init_upscaler = None
+ if self.upscaler is not None:
+ self.extra_generation_params["Img2Img upscaler"] = self.upscaler
+ if self.upscaler not in shared.latent_upscale_modes:
+ assert len([x for x in shared.sd_upscalers if x.name == self.upscaler]) > 0, f"could not find upscaler named {self.upscaler}"
+ init_upscaler = self.upscaler
self.width, self.height = self.get_final_size()
@@ -992,7 +1002,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
image_mask = images.resize_image(2, mask, self.width, self.height)
self.paste_to = (x1, y1, x2-x1, y2-y1)
else:
- image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height)
+ image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height, init_upscaler)
np_mask = np.array(image_mask)
np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
self.mask_for_overlay = Image.fromarray(np_mask)
@@ -1009,7 +1019,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
image = images.flatten(img, opts.img2img_background_color)
if crop_region is None and self.resize_mode != 3:
- image = images.resize_image(self.resize_mode, image, self.width, self.height)
+ image = images.resize_image(self.resize_mode, image, self.width, self.height, init_upscaler)
if image_mask is not None:
image_masked = Image.new('RGBa', (image.width, image.height))
@@ -1054,8 +1064,9 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
- if self.resize_mode == 3:
- self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
+ latent_scale_mode = shared.latent_upscale_modes.get(self.upscaler, None) if self.upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
+ if latent_scale_mode is not None:
+ self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"])
if image_mask is not None:
init_mask = latent_mask
diff --git a/modules/ui.py b/modules/ui.py
index bb548f92..24ab0af7 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -767,7 +767,7 @@ def create_ui():
)
with FormRow():
- resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
+ resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
for category in ordered_ui_categories():
if category == "sampler":
@@ -797,7 +797,9 @@ def create_ui():
with FormRow():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit")
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
+ with FormRow():
+ upscaler = gr.Dropdown(label="Upscaler", elem_id="img2img_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
+ denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
elif category == "seed":
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img')
@@ -934,6 +936,7 @@ def create_ui():
height,
width,
scale,
+ upscaler,
resize_mode,
inpaint_full_res,
inpaint_full_res_padding,
@@ -1019,7 +1022,8 @@ def create_ui():
(seed, "Seed"),
(width, "Size-1"),
(height, "Size-2"),
- (scale, "Img2Img Upscale"),
+ (scale, "Img2Img upscale"),
+ (upscaler, "Img2Img upscaler"),
(batch_size, "Batch size"),
(subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"),
--
cgit v1.2.1
From c5f9f7c23759f9a74fa2b563451569c8926604ba Mon Sep 17 00:00:00 2001
From: space-nuko <24979496+space-nuko@users.noreply.github.com>
Date: Sat, 25 Mar 2023 14:26:36 -0400
Subject: Use .success() callback on img2img preview inputs change
---
modules/ui.py | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
(limited to 'modules')
diff --git a/modules/ui.py b/modules/ui.py
index 24ab0af7..40dd76f2 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -855,7 +855,7 @@ def create_ui():
img2img_resolution_preview_inputs = [dummy_component, # filled in by selected img2img tab index in _js
scale, width, height, resize_mode,
init_img, sketch, init_img_with_mask, inpaint_color_sketch, init_img_inpaint]
- for input in img2img_resolution_preview_inputs:
+ for input in img2img_resolution_preview_inputs[1:]:
if isinstance(input, Releaseable):
input.release(
fn=calc_resolution_img2img,
@@ -863,8 +863,7 @@ def create_ui():
inputs=img2img_resolution_preview_inputs,
outputs=[final_resolution],
show_progress=False,
- )
- input.release(
+ ).success(
None,
_js="onCalcResolutionImg2Img",
inputs=img2img_resolution_preview_inputs,
@@ -878,8 +877,7 @@ def create_ui():
inputs=img2img_resolution_preview_inputs,
outputs=[final_resolution],
show_progress=False,
- )
- input.change(
+ ).success(
None,
_js="onCalcResolutionImg2Img",
inputs=img2img_resolution_preview_inputs,
--
cgit v1.2.1
From c9647c8d23efa8c939c6af39878784e246082122 Mon Sep 17 00:00:00 2001
From: space-nuko <24979496+space-nuko@users.noreply.github.com>
Date: Sat, 25 Mar 2023 16:11:41 -0400
Subject: Support Gradio's theme API
---
modules/shared.py | 35 +++++++++++++++++++++++++++++++++++
modules/ui.py | 2 +-
2 files changed, 36 insertions(+), 1 deletion(-)
(limited to 'modules')
diff --git a/modules/shared.py b/modules/shared.py
index 11be3985..2f7892cd 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -4,6 +4,7 @@ import json
import os
import sys
import time
+import requests
from PIL import Image
import gradio as gr
@@ -54,6 +55,21 @@ ui_reorder_categories = [
"scripts",
]
+# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json
+gradio_hf_hub_themes = [
+ "gradio/glass",
+ "gradio/monochrome",
+ "gradio/seafoam",
+ "gradio/soft",
+ "freddyaboulton/dracula_revamped",
+ "gradio/dracula_test",
+ "abidlabs/dracula_test",
+ "abidlabs/pakistan",
+ "dawood/microsoft_windows",
+ "ysharma/steampunk"
+]
+
+
cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
@@ -387,6 +403,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"),
"ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"),
"localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
+ "gradio_theme": OptionInfo("Default", "Gradio theme (requires restart)", gr.Dropdown, lambda: {"choices": ["Default"] + gradio_hf_hub_themes})
}))
options_templates.update(options_section(('ui', "Live previews"), {
@@ -599,6 +616,24 @@ clip_model = None
progress_print_out = sys.stdout
+gradio_theme = gr.themes.Base()
+
+
+def reload_gradio_theme(theme_name=None):
+ global gradio_theme
+ if not theme_name:
+ theme_name = opts.gradio_theme
+
+ if theme_name == "Default":
+ gradio_theme = gr.themes.Default()
+ else:
+ try:
+ gradio_theme = gr.themes.ThemeClass.from_hub(theme_name)
+ except requests.exceptions.ConnectionError:
+ print("Can't access HuggingFace Hub, falling back to default Gradio theme")
+ gradio_theme = gr.themes.Default()
+
+
class TotalTQDM:
def __init__(self):
diff --git a/modules/ui.py b/modules/ui.py
index af8546c2..6e049881 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1592,7 +1592,7 @@ def create_ui():
for _interface, label, _ifid in interfaces:
shared.tab_names.append(label)
- with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
+ with gr.Blocks(css=css, theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Row(elem_id="quicksettings", variant="compact"):
for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])):
component = create_setting_component(k, is_quicksettings=True)
--
cgit v1.2.1
From 8a34671fe91e142bce9e5556cca2258b3be9dd6e Mon Sep 17 00:00:00 2001
From: MrCheeze
Date: Fri, 24 Mar 2023 22:48:16 -0400
Subject: Add support for the Variations models (unclip-h and unclip-l)
---
modules/lowvram.py | 10 ++++++----
modules/processing.py | 41 +++++++++++++++++++++++++++------------
modules/sd_models.py | 5 +++++
modules/sd_models_config.py | 7 +++++++
modules/sd_samplers_compvis.py | 31 ++++++++++++++++++++++-------
modules/sd_samplers_kdiffusion.py | 19 ++++++++++++------
6 files changed, 84 insertions(+), 29 deletions(-)
(limited to 'modules')
diff --git a/modules/lowvram.py b/modules/lowvram.py
index 042a0254..e254cc13 100644
--- a/modules/lowvram.py
+++ b/modules/lowvram.py
@@ -55,12 +55,12 @@ def setup_for_low_vram(sd_model, use_medvram):
if hasattr(sd_model.cond_stage_model, 'model'):
sd_model.cond_stage_model.transformer = sd_model.cond_stage_model.model
- # remove four big modules, cond, first_stage, depth (if applicable), and unet from the model and then
+ # remove several big modules: cond, first_stage, depth/embedder (if applicable), and unet from the model and then
# send the model to GPU. Then put modules back. the modules will be in CPU.
- stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), sd_model.model
- sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = None, None, None, None
+ stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), getattr(sd_model, 'embedder', None), sd_model.model
+ sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.embedder, sd_model.model = None, None, None, None, None
sd_model.to(devices.device)
- sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = stored
+ sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.embedder, sd_model.model = stored
# register hooks for those the first three models
sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu)
@@ -69,6 +69,8 @@ def setup_for_low_vram(sd_model, use_medvram):
sd_model.first_stage_model.decode = first_stage_model_decode_wrap
if sd_model.depth_model:
sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu)
+ if sd_model.embedder:
+ sd_model.embedder.register_forward_pre_hook(send_me_to_gpu)
parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model
if hasattr(sd_model.cond_stage_model, 'model'):
diff --git a/modules/processing.py b/modules/processing.py
index 59717b4c..1451811c 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -78,21 +78,27 @@ def apply_overlay(image, paste_loc, index, overlays):
def txt2img_image_conditioning(sd_model, x, width, height):
- if sd_model.model.conditioning_key not in {'hybrid', 'concat'}:
- # Dummy zero conditioning if we're not using inpainting model.
- # Still takes up a bit of memory, but no encoder call.
- # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
- return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
+ if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models
- # The "masked-image" in this case will just be all zeros since the entire image is masked.
- image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
- image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning))
+ # The "masked-image" in this case will just be all zeros since the entire image is masked.
+ image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
+ image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning))
- # Add the fake full 1s mask to the first dimension.
- image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
- image_conditioning = image_conditioning.to(x.dtype)
+ # Add the fake full 1s mask to the first dimension.
+ image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
+ image_conditioning = image_conditioning.to(x.dtype)
- return image_conditioning
+ return image_conditioning
+
+ elif sd_model.model.conditioning_key == "crossattn-adm": # UnCLIP models
+
+ return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
+
+ else:
+ # Dummy zero conditioning if we're not using inpainting or unclip models.
+ # Still takes up a bit of memory, but no encoder call.
+ # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
+ return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
class StableDiffusionProcessing:
@@ -190,6 +196,14 @@ class StableDiffusionProcessing:
return conditioning_image
+ def unclip_image_conditioning(self, source_image):
+ c_adm = self.sd_model.embedder(source_image)
+ if self.sd_model.noise_augmentor is not None:
+ noise_level = 0 # TODO: Allow other noise levels?
+ c_adm, noise_level_emb = self.sd_model.noise_augmentor(c_adm, noise_level=repeat(torch.tensor([noise_level]).to(c_adm.device), '1 -> b', b=c_adm.shape[0]))
+ c_adm = torch.cat((c_adm, noise_level_emb), 1)
+ return c_adm
+
def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None):
self.is_using_inpainting_conditioning = True
@@ -241,6 +255,9 @@ class StableDiffusionProcessing:
if self.sampler.conditioning_key in {'hybrid', 'concat'}:
return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
+ if self.sampler.conditioning_key == "crossattn-adm":
+ return self.unclip_image_conditioning(source_image)
+
# Dummy zero conditioning if we're not using inpainting or depth model.
return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index f0cb1240..c1a80d82 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -383,6 +383,11 @@ def repair_config(sd_config):
elif shared.cmd_opts.upcast_sampling:
sd_config.model.params.unet_config.params.use_fp16 = True
+ # For UnCLIP-L, override the hardcoded karlo directory
+ if hasattr(sd_config.model.params, "noise_aug_config") and hasattr(sd_config.model.params.noise_aug_config.params, "clip_stats_path"):
+ karlo_path = os.path.join(paths.models_path, 'karlo')
+ sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path)
+
sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight'
sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight'
diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py
index 91c21700..9398f528 100644
--- a/modules/sd_models_config.py
+++ b/modules/sd_models_config.py
@@ -14,6 +14,8 @@ config_sd2 = os.path.join(sd_repo_configs_path, "v2-inference.yaml")
config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml")
config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml")
config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml")
+config_unclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-l-inference.yaml")
+config_unopenclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-h-inference.yaml")
config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml")
config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
@@ -65,9 +67,14 @@ def is_using_v_parameterization_for_sd2(state_dict):
def guess_model_config_from_state_dict(sd, filename):
sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None)
diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None)
+ sd2_variations_weight = sd.get('embedder.model.ln_final.weight', None)
if sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None:
return config_depth_model
+ elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 768:
+ return config_unclip
+ elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 1024:
+ return config_unopenclip
if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024:
if diffusion_model_input.shape[1] == 9:
diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py
index 083da18c..bfcc5574 100644
--- a/modules/sd_samplers_compvis.py
+++ b/modules/sd_samplers_compvis.py
@@ -70,8 +70,13 @@ class VanillaStableDiffusionSampler:
# Have to unwrap the inpainting conditioning here to perform pre-processing
image_conditioning = None
+ uc_image_conditioning = None
if isinstance(cond, dict):
- image_conditioning = cond["c_concat"][0]
+ if self.conditioning_key == "crossattn-adm":
+ image_conditioning = cond["c_adm"]
+ uc_image_conditioning = unconditional_conditioning["c_adm"]
+ else:
+ image_conditioning = cond["c_concat"][0]
cond = cond["c_crossattn"][0]
unconditional_conditioning = unconditional_conditioning["c_crossattn"][0]
@@ -98,8 +103,12 @@ class VanillaStableDiffusionSampler:
# Wrap the image conditioning back up since the DDIM code can accept the dict directly.
# Note that they need to be lists because it just concatenates them later.
if image_conditioning is not None:
- cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]}
- unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
+ if self.conditioning_key == "crossattn-adm":
+ cond = {"c_adm": image_conditioning, "c_crossattn": [cond]}
+ unconditional_conditioning = {"c_adm": uc_image_conditioning, "c_crossattn": [unconditional_conditioning]}
+ else:
+ cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]}
+ unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
return x, ts, cond, unconditional_conditioning
@@ -176,8 +185,12 @@ class VanillaStableDiffusionSampler:
# Wrap the conditioning models with additional image conditioning for inpainting model
if image_conditioning is not None:
- conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
- unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
+ if self.conditioning_key == "crossattn-adm":
+ conditioning = {"c_adm": image_conditioning, "c_crossattn": [conditioning]}
+ unconditional_conditioning = {"c_adm": torch.zeros_like(image_conditioning), "c_crossattn": [unconditional_conditioning]}
+ else:
+ conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
+ unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
samples = self.launch_sampling(t_enc + 1, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
@@ -195,8 +208,12 @@ class VanillaStableDiffusionSampler:
# Wrap the conditioning models with additional image conditioning for inpainting model
# dummy_for_plms is needed because PLMS code checks the first item in the dict to have the right shape
if image_conditioning is not None:
- conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_concat": [image_conditioning]}
- unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_concat": [image_conditioning]}
+ if self.conditioning_key == "crossattn-adm":
+ conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_adm": image_conditioning}
+ unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_adm": torch.zeros_like(image_conditioning)}
+ else:
+ conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_concat": [image_conditioning]}
+ unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_concat": [image_conditioning]}
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 93f0e55a..e9f08518 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -92,14 +92,21 @@ class CFGDenoiser(torch.nn.Module):
batch_size = len(conds_list)
repeats = [len(conds_list[i]) for i in range(batch_size)]
+ if shared.sd_model.model.conditioning_key == "crossattn-adm":
+ image_uncond = torch.zeros_like(image_cond)
+ make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": c_crossattn, "c_adm": c_adm}
+ else:
+ image_uncond = image_cond
+ make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn, "c_concat": [c_concat]}
+
if not is_edit_model:
x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
- image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])
+ image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond])
else:
x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x])
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma])
- image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond] + [torch.zeros_like(self.init_latent)])
+ image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)])
denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond)
cfg_denoiser_callback(denoiser_params)
@@ -116,13 +123,13 @@ class CFGDenoiser(torch.nn.Module):
cond_in = torch.cat([tensor, uncond, uncond])
if shared.batch_cond_uncond:
- x_out = self.inner_model(x_in, sigma_in, cond={"c_crossattn": [cond_in], "c_concat": [image_cond_in]})
+ x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict([cond_in], image_cond_in))
else:
x_out = torch.zeros_like(x_in)
for batch_offset in range(0, x_out.shape[0], batch_size):
a = batch_offset
b = a + batch_size
- x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [cond_in[a:b]], "c_concat": [image_cond_in[a:b]]})
+ x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict([cond_in[a:b]], image_cond_in[a:b]))
else:
x_out = torch.zeros_like(x_in)
batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size
@@ -135,9 +142,9 @@ class CFGDenoiser(torch.nn.Module):
else:
c_crossattn = torch.cat([tensor[a:b]], uncond)
- x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": c_crossattn, "c_concat": [image_cond_in[a:b]]})
+ x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b]))
- x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]})
+ x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:]))
denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps)
cfg_denoised_callback(denoised_params)
--
cgit v1.2.1
From 1f08600345298fac0bcb66cc215a81875a84d7b9 Mon Sep 17 00:00:00 2001
From: MrCheeze
Date: Sun, 26 Mar 2023 16:55:29 -0400
Subject: overwrite xformers in the unclip model config if not available
---
modules/sd_models.py | 3 +++
1 file changed, 3 insertions(+)
(limited to 'modules')
diff --git a/modules/sd_models.py b/modules/sd_models.py
index c1a80d82..e741470a 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -383,6 +383,9 @@ def repair_config(sd_config):
elif shared.cmd_opts.upcast_sampling:
sd_config.model.params.unet_config.params.use_fp16 = True
+ if getattr(sd_config.model.params.first_stage_config.params.ddconfig, "attn_type", None) == "vanilla-xformers" and not shared.xformers_available:
+ sd_config.model.params.first_stage_config.params.ddconfig.attn_type = "vanilla"
+
# For UnCLIP-L, override the hardcoded karlo directory
if hasattr(sd_config.model.params, "noise_aug_config") and hasattr(sd_config.model.params.noise_aug_config.params, "clip_stats_path"):
karlo_path = os.path.join(paths.models_path, 'karlo')
--
cgit v1.2.1
From 77f9db3b080fafbc39c1b188777a93b5a1ab0f9e Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Mon, 27 Mar 2023 12:59:12 +0300
Subject: serve css as independent files
---
modules/ui.py | 74 +++++++++++++++++++++++++++++++++--------------------------
1 file changed, 41 insertions(+), 33 deletions(-)
(limited to 'modules')
diff --git a/modules/ui.py b/modules/ui.py
index af8546c2..eb5fcd3f 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -70,17 +70,6 @@ def gr_show(visible=True):
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
-css_hide_progressbar = """
-.wrap .m-12 svg { display:none!important; }
-.wrap .m-12::before { content:"Loading..." }
-.wrap .z-20 svg { display:none!important; }
-.wrap .z-20::before { content:"Loading..." }
-.wrap.cover-bg .z-20::before { content:"" }
-.progress-bar { display:none!important; }
-.meta-text { display:none!important; }
-.meta-text-center { display:none!important; }
-"""
-
# Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️
@@ -1566,22 +1555,6 @@ def create_ui():
(train_interface, "Train", "ti"),
]
- css = ""
-
- for cssfile in modules.scripts.list_files_with_name("style.css"):
- if not os.path.isfile(cssfile):
- continue
-
- with open(cssfile, "r", encoding="utf8") as file:
- css += file.read() + "\n"
-
- if os.path.exists(os.path.join(data_path, "user.css")):
- with open(os.path.join(data_path, "user.css"), "r", encoding="utf8") as file:
- css += file.read() + "\n"
-
- if not cmd_opts.no_progressbar_hiding:
- css += css_hide_progressbar
-
interfaces += script_callbacks.ui_tabs_callback()
interfaces += [(settings_interface, "Settings", "settings")]
@@ -1592,7 +1565,7 @@ def create_ui():
for _interface, label, _ifid in interfaces:
shared.tab_names.append(label)
- with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
+ with gr.Blocks(analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Row(elem_id="quicksettings", variant="compact"):
for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])):
component = create_setting_component(k, is_quicksettings=True)
@@ -1777,25 +1750,60 @@ def create_ui():
return demo
-def reload_javascript():
+def webpath(fn):
+ if fn.startswith(script_path):
+ web_path = os.path.relpath(fn, script_path).replace('\\', '/')
+ else:
+ web_path = os.path.abspath(fn)
+
+ return f'file={web_path}?{os.path.getmtime(fn)}'
+
+
+def javascript_html():
script_js = os.path.join(script_path, "script.js")
- head = f'\n'
+ head = f'\n'
inline = f"{localization.localization_js(shared.opts.localization)};"
if cmd_opts.theme is not None:
inline += f"set_theme('{cmd_opts.theme}');"
for script in modules.scripts.list_scripts("javascript", ".js"):
- head += f'\n'
+ head += f'\n'
for script in modules.scripts.list_scripts("javascript", ".mjs"):
- head += f'\n'
+ head += f'\n'
head += f'\n'
+ return head
+
+
+def css_html():
+ head = ""
+
+ def stylesheet(fn):
+ return f''
+
+ for cssfile in modules.scripts.list_files_with_name("style.css"):
+ if not os.path.isfile(cssfile):
+ continue
+
+ head += stylesheet(cssfile)
+
+ if os.path.exists(os.path.join(data_path, "user.css")):
+ head += stylesheet(os.path.join(data_path, "user.css"))
+
+ return head
+
+
+def reload_javascript():
+ js = javascript_html()
+ css = css_html()
+
def template_response(*args, **kwargs):
res = shared.GradioTemplateResponseOriginal(*args, **kwargs)
- res.body = res.body.replace(b'', f'{head}'.encode("utf8"))
+ res.body = res.body.replace(b'', f'{js}'.encode("utf8"))
+ res.body = res.body.replace(b'