diff options
67 files changed, 3135 insertions, 2289 deletions
diff --git a/.eslintrc.js b/.eslintrc.js index f33aca09..e3b4fb76 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -87,5 +87,9 @@ module.exports = { modalNextImage: "readonly", // token-counters.js setupTokenCounters: "readonly", + // localStorage.js + localSet: "readonly", + localGet: "readonly", + localRemove: "readonly" } }; @@ -115,7 +115,7 @@ Alternatively, use online services (like Google Colab): 1. Install the dependencies:
```bash
# Debian-based:
-sudo apt install wget git python3 python3-venv
+sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0
# Red Hat-based:
sudo dnf install wget git python3
# Arch-based:
@@ -123,7 +123,7 @@ sudo pacman -S wget git python3 ```
2. Navigate to the directory you would like the webui to be installed and execute the following command:
```bash
-bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh)
+wget -q https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh
```
3. Run `webui.sh`.
4. Check `webui-user.sh` for options.
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 17cbe1bb..bc722e90 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -195,6 +195,15 @@ def load_network(name, network_on_disk): return net
+def purge_networks_from_memory():
+ while len(networks_in_memory) > shared.opts.lora_in_memory_limit and len(networks_in_memory) > 0:
+ name = next(iter(networks_in_memory))
+ networks_in_memory.pop(name, None)
+
+ devices.torch_gc()
+
+
+
def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None):
already_loaded = {}
@@ -212,15 +221,19 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No failed_to_load_networks = []
- for i, name in enumerate(names):
+ for i, (network_on_disk, name) in enumerate(zip(networks_on_disk, names)):
net = already_loaded.get(name, None)
- network_on_disk = networks_on_disk[i]
-
if network_on_disk is not None:
+ if net is None:
+ net = networks_in_memory.get(name)
+
if net is None or os.path.getmtime(network_on_disk.filename) > net.mtime:
try:
net = load_network(name, network_on_disk)
+
+ networks_in_memory.pop(name, None)
+ networks_in_memory[name] = net
except Exception as e:
errors.display(e, f"loading network {network_on_disk.filename}")
continue
@@ -242,6 +255,8 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No if failed_to_load_networks:
sd_hijack.model_hijack.comments.append("Failed to find networks: " + ", ".join(failed_to_load_networks))
+ purge_networks_from_memory()
+
def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
weights_backup = getattr(self, "network_weights_backup", None)
@@ -462,6 +477,7 @@ def infotext_pasted(infotext, params): available_networks = {}
available_network_aliases = {}
loaded_networks = []
+networks_in_memory = {}
available_network_hash_lookup = {}
forbidden_network_aliases = {}
diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index cd28afc9..6ab8b6e7 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -65,6 +65,7 @@ shared.options_templates.update(shared.options_section(('extra_networks', "Extra "lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
"lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
"lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
+ "lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}),
}))
@@ -121,3 +122,5 @@ def infotext_pasted(infotext, d): script_callbacks.on_infotext_pasted(infotext_pasted)
+
+shared.opts.onchange("lora_in_memory_limit", networks.purge_networks_from_memory)
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index 30199dcd..e7616b98 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -42,6 +42,11 @@ onUiLoaded(async() => { } } + // Detect whether the element has a horizontal scroll bar + function hasHorizontalScrollbar(element) { + return element.scrollWidth > element.clientWidth; + } + // Function for defining the "Ctrl", "Shift" and "Alt" keys function isModifierKey(event, key) { switch (key) { @@ -201,7 +206,8 @@ onUiLoaded(async() => { canvas_hotkey_overlap: "KeyO", canvas_disabled_functions: [], canvas_show_tooltip: true, - canvas_blur_prompt: false + canvas_auto_expand: true, + canvas_blur_prompt: false, }; const functionMap = { @@ -648,8 +654,32 @@ onUiLoaded(async() => { mouseY = e.offsetY; } + // Simulation of the function to put a long image into the screen. + // We detect if an image has a scroll bar or not, make a fullscreen to reveal the image, then reduce it to fit into the element. + // We hide the image and show it to the user when it is ready. + function autoExpand(e) { + const canvas = document.querySelector(`${elemId} canvas[key="interface"]`); + const isMainTab = activeElement === elementIDs.inpaint || activeElement === elementIDs.inpaintSketch || activeElement === elementIDs.sketch; + + if (canvas && isMainTab) { + if (hasHorizontalScrollbar(targetElement)) { + targetElement.style.visibility = "hidden"; + setTimeout(() => { + fitToScreen(); + resetZoom(); + targetElement.style.visibility = "visible"; + }, 10); + } + } + } + targetElement.addEventListener("mousemove", getMousePosition); + // Apply auto expand if enabled + if (hotkeysConfig.canvas_auto_expand) { + targetElement.addEventListener("mousemove", autoExpand); + } + // Handle events only inside the targetElement let isKeyDownHandlerAttached = false; diff --git a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py index 380176ce..2d8d2d1c 100644 --- a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py +++ b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py @@ -9,6 +9,7 @@ shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"), "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"), "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"), + "canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"), "canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"), "canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}), })) diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py index a05e10d8..588b64d2 100644 --- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py +++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py @@ -1,5 +1,7 @@ +import math
+
import gradio as gr
-from modules import scripts, shared, ui_components, ui_settings
+from modules import scripts, shared, ui_components, ui_settings, generation_parameters_copypaste
from modules.ui_components import FormColumn
@@ -19,18 +21,37 @@ class ExtraOptionsSection(scripts.Script): def ui(self, is_img2img):
self.comps = []
self.setting_names = []
+ self.infotext_fields = []
+
+ mapping = {k: v for v, k in generation_parameters_copypaste.infotext_to_setting_name_mapping}
with gr.Blocks() as interface:
- with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and shared.opts.extra_options else gr.Group(), gr.Row():
- for setting_name in shared.opts.extra_options:
- with FormColumn():
- comp = ui_settings.create_setting_component(setting_name)
+ with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and shared.opts.extra_options else gr.Group():
+
+ row_count = math.ceil(len(shared.opts.extra_options) / shared.opts.extra_options_cols)
+
+ for row in range(row_count):
+ with gr.Row():
+ for col in range(shared.opts.extra_options_cols):
+ index = row * shared.opts.extra_options_cols + col
+ if index >= len(shared.opts.extra_options):
+ break
+
+ setting_name = shared.opts.extra_options[index]
- self.comps.append(comp)
- self.setting_names.append(setting_name)
+ with FormColumn():
+ comp = ui_settings.create_setting_component(setting_name)
+
+ self.comps.append(comp)
+ self.setting_names.append(setting_name)
+
+ setting_infotext_name = mapping.get(setting_name)
+ if setting_infotext_name is not None:
+ self.infotext_fields.append((comp, setting_infotext_name))
def get_settings_values():
- return [ui_settings.get_value_for_setting(key) for key in self.setting_names]
+ res = [ui_settings.get_value_for_setting(key) for key in self.setting_names]
+ return res[0] if len(res) == 1 else res
interface.load(fn=get_settings_values, inputs=[], outputs=self.comps, queue=False, show_progress=False)
@@ -43,6 +64,9 @@ class ExtraOptionsSection(scripts.Script): shared.options_templates.update(shared.options_section(('ui', "User interface"), {
- "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_restart(),
- "extra_options_accordion": shared.OptionInfo(False, "Place options in main UI into an accordion")
+ "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_reload_ui(),
+ "extra_options_cols": shared.OptionInfo(1, "Options in main UI - number of columns", gr.Number, {"precision": 0}).needs_reload_ui(),
+ "extra_options_accordion": shared.OptionInfo(False, "Options in main UI - place into an accordion").needs_reload_ui()
}))
+
+
diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 5582a6e5..897ebeba 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -1,20 +1,38 @@ +function toggleCss(key, css, enable) { + var style = document.getElementById(key); + if (enable && !style) { + style = document.createElement('style'); + style.id = key; + style.type = 'text/css'; + document.head.appendChild(style); + } + if (style && !enable) { + document.head.removeChild(style); + } + if (style) { + style.innerHTML == ''; + style.appendChild(document.createTextNode(css)); + } +} + function setupExtraNetworksForTab(tabname) { gradioApp().querySelector('#' + tabname + '_extra_tabs').classList.add('extra-networks'); var tabs = gradioApp().querySelector('#' + tabname + '_extra_tabs > div'); - var search = gradioApp().querySelector('#' + tabname + '_extra_search textarea'); + var searchDiv = gradioApp().getElementById(tabname + '_extra_search'); + var search = searchDiv.querySelector('textarea'); var sort = gradioApp().getElementById(tabname + '_extra_sort'); var sortOrder = gradioApp().getElementById(tabname + '_extra_sortorder'); var refresh = gradioApp().getElementById(tabname + '_extra_refresh'); + var showDirsDiv = gradioApp().getElementById(tabname + '_extra_show_dirs'); + var showDirs = gradioApp().querySelector('#' + tabname + '_extra_show_dirs input'); - search.classList.add('search'); - sort.classList.add('sort'); - sortOrder.classList.add('sortorder'); sort.dataset.sortkey = 'sortDefault'; - tabs.appendChild(search); + tabs.appendChild(searchDiv); tabs.appendChild(sort); tabs.appendChild(sortOrder); tabs.appendChild(refresh); + tabs.appendChild(showDirsDiv); var applyFilter = function() { var searchTerm = search.value.toLowerCase(); @@ -80,6 +98,15 @@ function setupExtraNetworksForTab(tabname) { }); extraNetworksApplyFilter[tabname] = applyFilter; + + var showDirsUpdate = function() { + var css = '#' + tabname + '_extra_tabs .extra-network-subdirs { display: none; }'; + toggleCss(tabname + '_extra_show_dirs_style', css, !showDirs.checked); + localSet('extra-networks-show-dirs', showDirs.checked ? 1 : 0); + }; + showDirs.checked = localGet('extra-networks-show-dirs', 1) == 1; + showDirs.addEventListener("change", showDirsUpdate); + showDirsUpdate(); } function applyExtraNetworkFilter(tabname) { @@ -179,7 +206,7 @@ function saveCardPreview(event, tabname, filename) { } function extraNetworksSearchButton(tabs_id, event) { - var searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > div > textarea'); + var searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > label > textarea'); var button = event.target; var text = button.classList.contains("search-all") ? "" : button.textContent.trim(); diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index 677e95c1..c21d396e 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -136,6 +136,11 @@ function setupImageForLightbox(e) { var event = isFirefox ? 'mousedown' : 'click'; e.addEventListener(event, function(evt) { + if (evt.button == 1) { + open(evt.target.src); + evt.preventDefault(); + return; + } if (!opts.js_modal_lightbox || evt.button != 0) return; modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed); diff --git a/javascript/localStorage.js b/javascript/localStorage.js new file mode 100644 index 00000000..dc1a36c3 --- /dev/null +++ b/javascript/localStorage.js @@ -0,0 +1,26 @@ + +function localSet(k, v) { + try { + localStorage.setItem(k, v); + } catch (e) { + console.warn(`Failed to save ${k} to localStorage: ${e}`); + } +} + +function localGet(k, def) { + try { + return localStorage.getItem(k); + } catch (e) { + console.warn(`Failed to load ${k} from localStorage: ${e}`); + } + + return def; +} + +function localRemove(k) { + try { + return localStorage.removeItem(k); + } catch (e) { + console.warn(`Failed to remove ${k} from localStorage: ${e}`); + } +} diff --git a/javascript/ui.js b/javascript/ui.js index abf23a78..bade3089 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -152,15 +152,11 @@ function submit() { showSubmitButtons('txt2img', false); var id = randomId(); - try { - localStorage.setItem("txt2img_task_id", id); - } catch (e) { - console.warn(`Failed to save txt2img task id to localStorage: ${e}`); - } + localSet("txt2img_task_id", id); requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() { showSubmitButtons('txt2img', true); - localStorage.removeItem("txt2img_task_id"); + localRemove("txt2img_task_id"); showRestoreProgressButton('txt2img', false); }); @@ -175,15 +171,11 @@ function submit_img2img() { showSubmitButtons('img2img', false); var id = randomId(); - try { - localStorage.setItem("img2img_task_id", id); - } catch (e) { - console.warn(`Failed to save img2img task id to localStorage: ${e}`); - } + localSet("img2img_task_id", id); requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() { showSubmitButtons('img2img', true); - localStorage.removeItem("img2img_task_id"); + localRemove("img2img_task_id"); showRestoreProgressButton('img2img', false); }); @@ -197,7 +189,7 @@ function submit_img2img() { function restoreProgressTxt2img() { showRestoreProgressButton("txt2img", false); - var id = localStorage.getItem("txt2img_task_id"); + var id = localGet("txt2img_task_id"); if (id) { requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() { @@ -211,7 +203,7 @@ function restoreProgressTxt2img() { function restoreProgressImg2img() { showRestoreProgressButton("img2img", false); - var id = localStorage.getItem("img2img_task_id"); + var id = localGet("img2img_task_id"); if (id) { requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() { @@ -224,8 +216,8 @@ function restoreProgressImg2img() { onUiLoaded(function() { - showRestoreProgressButton('txt2img', localStorage.getItem("txt2img_task_id")); - showRestoreProgressButton('img2img', localStorage.getItem("img2img_task_id")); + showRestoreProgressButton('txt2img', localGet("txt2img_task_id")); + showRestoreProgressButton('img2img', localGet("img2img_task_id")); }); diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 64f21e01..b0a11538 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -16,6 +16,7 @@ parser.add_argument("--test-server", action='store_true', help="launch.py argume parser.add_argument("--log-startup", action='store_true', help="launch.py argument: print a detailed log of what's happening at startup")
parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation")
parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages")
+parser.add_argument("--loglevel", type=str, help="log level; one of: CRITICAL, ERROR, WARNING, INFO, DEBUG", default=None)
parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint")
parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",)
diff --git a/modules/devices.py b/modules/devices.py index 00a00b18..c01f0602 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -3,7 +3,7 @@ import contextlib from functools import lru_cache import torch -from modules import errors, rng_philox +from modules import errors, shared if sys.platform == "darwin": from modules import mac_specific @@ -17,8 +17,6 @@ def has_mps() -> bool: def get_cuda_device_string(): - from modules import shared - if shared.cmd_opts.device_id is not None: return f"cuda:{shared.cmd_opts.device_id}" @@ -40,8 +38,6 @@ def get_optimal_device(): def get_device_for(task): - from modules import shared - if task in shared.cmd_opts.use_cpu: return cpu @@ -96,87 +92,7 @@ def cond_cast_float(input): nv_rng = None -def randn(seed, shape): - """Generate a tensor with random numbers from a normal distribution using seed. - - Uses the seed parameter to set the global torch seed; to generate more with that seed, use randn_like/randn_without_seed.""" - - from modules.shared import opts - - manual_seed(seed) - - if opts.randn_source == "NV": - return torch.asarray(nv_rng.randn(shape), device=device) - - if opts.randn_source == "CPU" or device.type == 'mps': - return torch.randn(shape, device=cpu).to(device) - - return torch.randn(shape, device=device) - - -def randn_local(seed, shape): - """Generate a tensor with random numbers from a normal distribution using seed. - - Does not change the global random number generator. You can only generate the seed's first tensor using this function.""" - - from modules.shared import opts - - if opts.randn_source == "NV": - rng = rng_philox.Generator(seed) - return torch.asarray(rng.randn(shape), device=device) - - local_device = cpu if opts.randn_source == "CPU" or device.type == 'mps' else device - local_generator = torch.Generator(local_device).manual_seed(int(seed)) - return torch.randn(shape, device=local_device, generator=local_generator).to(device) - - -def randn_like(x): - """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. - - Use either randn() or manual_seed() to initialize the generator.""" - - from modules.shared import opts - - if opts.randn_source == "NV": - return torch.asarray(nv_rng.randn(x.shape), device=x.device, dtype=x.dtype) - - if opts.randn_source == "CPU" or x.device.type == 'mps': - return torch.randn_like(x, device=cpu).to(x.device) - - return torch.randn_like(x) - - -def randn_without_seed(shape): - """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. - - Use either randn() or manual_seed() to initialize the generator.""" - - from modules.shared import opts - - if opts.randn_source == "NV": - return torch.asarray(nv_rng.randn(shape), device=device) - - if opts.randn_source == "CPU" or device.type == 'mps': - return torch.randn(shape, device=cpu).to(device) - - return torch.randn(shape, device=device) - - -def manual_seed(seed): - """Set up a global random number generator using the specified seed.""" - from modules.shared import opts - - if opts.randn_source == "NV": - global nv_rng - nv_rng = rng_philox.Generator(seed) - return - - torch.manual_seed(seed) - - def autocast(disable=False): - from modules import shared - if disable: return contextlib.nullcontext() @@ -195,8 +111,6 @@ class NansException(Exception): def test_for_nans(x, where): - from modules import shared - if shared.cmd_opts.disable_nan_check: return @@ -236,3 +150,4 @@ def first_time_calculation(): x = torch.zeros((1, 1, 3, 3)).to(device, dtype) conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype) conv2d(x) + diff --git a/modules/extensions.py b/modules/extensions.py index e4633af4..bf9a1878 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -1,7 +1,7 @@ import os
import threading
-from modules import shared, errors, cache
+from modules import shared, errors, cache, scripts
from modules.gitpython_hack import Repo
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
@@ -90,8 +90,6 @@ class Extension: self.have_info_from_repo = True
def list_files(self, subdir, extension):
- from modules import scripts
-
dirpath = os.path.join(self.path, subdir)
if not os.path.isdir(dirpath):
return []
diff --git a/modules/extra_networks.py b/modules/extra_networks.py index 6ae07e91..fa28ac75 100644 --- a/modules/extra_networks.py +++ b/modules/extra_networks.py @@ -1,3 +1,5 @@ +import json
+import os
import re
from collections import defaultdict
@@ -177,3 +179,20 @@ def parse_prompts(prompts): return res, extra_data
+
+def get_user_metadata(filename):
+ if filename is None:
+ return {}
+
+ basename, ext = os.path.splitext(filename)
+ metadata_filename = basename + '.json'
+
+ metadata = {}
+ try:
+ if os.path.isfile(metadata_filename):
+ with open(metadata_filename, "r", encoding="utf8") as file:
+ metadata = json.load(file)
+ except Exception as e:
+ errors.display(e, f"reading extra network user metadata from {metadata_filename}")
+
+ return metadata
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index a3448be9..d932c67d 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -6,7 +6,7 @@ import re import gradio as gr
from modules.paths import data_path
-from modules import shared, ui_tempdir, script_callbacks
+from modules import shared, ui_tempdir, script_callbacks, processing
from PIL import Image
re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)'
@@ -198,7 +198,6 @@ def restore_old_hires_fix_params(res): height = int(res.get("Size-2", 512))
if firstpass_width == 0 or firstpass_height == 0:
- from modules import processing
firstpass_width, firstpass_height = processing.old_hires_fix_first_pass_dimensions(width, height)
res['Size-1'] = firstpass_width
@@ -280,6 +279,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model if "Hires sampler" not in res:
res["Hires sampler"] = "Use same sampler"
+ if "Hires checkpoint" not in res:
+ res["Hires checkpoint"] = "Use same checkpoint"
+
if "Hires prompt" not in res:
res["Hires prompt"] = ""
@@ -304,6 +306,12 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model if "Schedule rho" not in res:
res["Schedule rho"] = 0
+ if "VAE Encoder" not in res:
+ res["VAE Encoder"] = "Full"
+
+ if "VAE Decoder" not in res:
+ res["VAE Decoder"] = "Full"
+
return res
@@ -319,6 +327,10 @@ infotext_to_setting_name_mapping = [ ('Noise multiplier', 'initial_noise_multiplier'),
('Eta', 'eta_ancestral'),
('Eta DDIM', 'eta_ddim'),
+ ('Sigma churn', 's_churn'),
+ ('Sigma tmin', 's_tmin'),
+ ('Sigma tmax', 's_tmax'),
+ ('Sigma noise', 's_noise'),
('Discard penultimate sigma', 'always_discard_next_to_last_sigma'),
('UniPC variant', 'uni_pc_variant'),
('UniPC skip type', 'uni_pc_skip_type'),
@@ -329,6 +341,8 @@ infotext_to_setting_name_mapping = [ ('RNG', 'randn_source'),
('NGMS', 's_min_uncond'),
('Pad conds', 'pad_cond_uncond'),
+ ('VAE Encoder', 'sd_vae_encode_method'),
+ ('VAE Decoder', 'sd_vae_decode_method'),
]
@@ -399,10 +413,15 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, return res
if override_settings_component is not None:
+ already_handled_fields = {key: 1 for _, key in paste_fields}
+
def paste_settings(params):
vals = {}
for param_name, setting_name in infotext_to_setting_name_mapping:
+ if param_name in already_handled_fields:
+ continue
+
v = params.get(param_name, None)
if v is None:
continue
diff --git a/modules/gradio_extensons.py b/modules/gradio_extensons.py index 5af7fd8e..77c34c8b 100644 --- a/modules/gradio_extensons.py +++ b/modules/gradio_extensons.py @@ -1,6 +1,6 @@ import gradio as gr
-from modules import scripts
+from modules import scripts, ui_tempdir
def add_classes_to_gradio_component(comp):
"""
@@ -58,3 +58,5 @@ original_BlockContext_init = gr.blocks.BlockContext.__init__ gr.components.IOComponent.__init__ = IOComponent_init
gr.blocks.Block.get_config = Block_get_config
gr.blocks.BlockContext.__init__ = BlockContext_init
+
+ui_tempdir.install_ui_tempdir_override()
diff --git a/modules/images.py b/modules/images.py index 38aa933d..019c1d60 100644 --- a/modules/images.py +++ b/modules/images.py @@ -21,8 +21,6 @@ from modules import sd_samplers, shared, script_callbacks, errors from modules.paths_internal import roboto_ttf_file
from modules.shared import opts
-import modules.sd_vae as sd_vae
-
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
@@ -318,7 +316,7 @@ def resize_image(resize_mode, im, width, height, upscaler_name=None): return res
-invalid_filename_chars = '<>:"/\\|?*\n'
+invalid_filename_chars = '<>:"/\\|?*\n\r\t'
invalid_filename_prefix = ' '
invalid_filename_postfix = ' .'
re_nonletters = re.compile(r'[\s' + string.punctuation + ']+')
@@ -342,16 +340,6 @@ def sanitize_filename_part(text, replace_spaces=True): class FilenameGenerator:
- def get_vae_filename(self): #get the name of the VAE file.
- if sd_vae.loaded_vae_file is None:
- return "NoneType"
- file_name = os.path.basename(sd_vae.loaded_vae_file)
- split_file_name = file_name.split('.')
- if len(split_file_name) > 1 and split_file_name[0] == '':
- return split_file_name[1] # if the first character of the filename is "." then [1] is obtained.
- else:
- return split_file_name[0]
-
replacements = {
'seed': lambda self: self.seed if self.seed is not None else '',
'seed_first': lambda self: self.seed if self.p.batch_size == 1 else self.p.all_seeds[0],
@@ -391,6 +379,22 @@ class FilenameGenerator: self.image = image
self.zip = zip
+ def get_vae_filename(self):
+ """Get the name of the VAE file."""
+
+ import modules.sd_vae as sd_vae
+
+ if sd_vae.loaded_vae_file is None:
+ return "NoneType"
+
+ file_name = os.path.basename(sd_vae.loaded_vae_file)
+ split_file_name = file_name.split('.')
+ if len(split_file_name) > 1 and split_file_name[0] == '':
+ return split_file_name[1] # if the first character of the filename is "." then [1] is obtained.
+ else:
+ return split_file_name[0]
+
+
def hasprompt(self, *args):
lower = self.prompt.lower()
if self.p is None or self.prompt is None:
diff --git a/modules/img2img.py b/modules/img2img.py index d8e1c534..e06ac1d6 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -6,7 +6,7 @@ import numpy as np from PIL import Image, ImageOps, ImageFilter, ImageEnhance, UnidentifiedImageError
import gradio as gr
-from modules import sd_samplers, images as imgutil
+from modules import images as imgutil
from modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters
from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
from modules.shared import opts, state
@@ -116,7 +116,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal process_images(p)
-def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
+def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
override_settings = create_override_settings_dict(override_settings_texts)
is_batch = mode == 5
@@ -172,7 +172,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s seed_resize_from_h=seed_resize_from_h,
seed_resize_from_w=seed_resize_from_w,
seed_enable_extras=seed_enable_extras,
- sampler_name=sd_samplers.samplers_for_img2img[sampler_index].name,
+ sampler_name=sampler_name,
batch_size=batch_size,
n_iter=n_iter,
steps=steps,
diff --git a/modules/initialize.py b/modules/initialize.py new file mode 100644 index 00000000..f24f7637 --- /dev/null +++ b/modules/initialize.py @@ -0,0 +1,168 @@ +import importlib
+import logging
+import sys
+import warnings
+from threading import Thread
+
+from modules.timer import startup_timer
+
+
+def imports():
+ logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
+ logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
+
+ import torch # noqa: F401
+ startup_timer.record("import torch")
+ import pytorch_lightning # noqa: F401
+ startup_timer.record("import torch")
+ warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning")
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")
+
+ import gradio # noqa: F401
+ startup_timer.record("import gradio")
+
+ from modules import paths, timer, import_hook, errors # noqa: F401
+ startup_timer.record("setup paths")
+
+ import ldm.modules.encoders.modules # noqa: F401
+ startup_timer.record("import ldm")
+
+ import sgm.modules.encoders.modules # noqa: F401
+ startup_timer.record("import sgm")
+
+ from modules import shared_init
+ shared_init.initialize()
+ startup_timer.record("initialize shared")
+
+ from modules import processing, gradio_extensons, ui # noqa: F401
+ startup_timer.record("other imports")
+
+
+def check_versions():
+ from modules.shared_cmd_options import cmd_opts
+
+ if not cmd_opts.skip_version_check:
+ from modules import errors
+ errors.check_versions()
+
+
+def initialize():
+ from modules import initialize_util
+ initialize_util.fix_torch_version()
+ initialize_util.fix_asyncio_event_loop_policy()
+ initialize_util.validate_tls_options()
+ initialize_util.configure_sigint_handler()
+ initialize_util.configure_opts_onchange()
+
+ from modules import modelloader
+ modelloader.cleanup_models()
+
+ from modules import sd_models
+ sd_models.setup_model()
+ startup_timer.record("setup SD model")
+
+ from modules.shared_cmd_options import cmd_opts
+
+ from modules import codeformer_model
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision.transforms.functional_tensor")
+ codeformer_model.setup_model(cmd_opts.codeformer_models_path)
+ startup_timer.record("setup codeformer")
+
+ from modules import gfpgan_model
+ gfpgan_model.setup_model(cmd_opts.gfpgan_models_path)
+ startup_timer.record("setup gfpgan")
+
+ initialize_rest(reload_script_modules=False)
+
+
+def initialize_rest(*, reload_script_modules=False):
+ """
+ Called both from initialize() and when reloading the webui.
+ """
+ from modules.shared_cmd_options import cmd_opts
+
+ from modules import sd_samplers
+ sd_samplers.set_samplers()
+ startup_timer.record("set samplers")
+
+ from modules import extensions
+ extensions.list_extensions()
+ startup_timer.record("list extensions")
+
+ from modules import initialize_util
+ initialize_util.restore_config_state_file()
+ startup_timer.record("restore config state file")
+
+ from modules import shared, upscaler, scripts
+ if cmd_opts.ui_debug_mode:
+ shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
+ scripts.load_scripts()
+ return
+
+ from modules import sd_models
+ sd_models.list_models()
+ startup_timer.record("list SD models")
+
+ from modules import localization
+ localization.list_localizations(cmd_opts.localizations_dir)
+ startup_timer.record("list localizations")
+
+ with startup_timer.subcategory("load scripts"):
+ scripts.load_scripts()
+
+ if reload_script_modules:
+ for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
+ importlib.reload(module)
+ startup_timer.record("reload script modules")
+
+ from modules import modelloader
+ modelloader.load_upscalers()
+ startup_timer.record("load upscalers")
+
+ from modules import sd_vae
+ sd_vae.refresh_vae_list()
+ startup_timer.record("refresh VAE")
+
+ from modules import textual_inversion
+ textual_inversion.textual_inversion.list_textual_inversion_templates()
+ startup_timer.record("refresh textual inversion templates")
+
+ from modules import script_callbacks, sd_hijack_optimizations, sd_hijack
+ script_callbacks.on_list_optimizers(sd_hijack_optimizations.list_optimizers)
+ sd_hijack.list_optimizers()
+ startup_timer.record("scripts list_optimizers")
+
+ from modules import sd_unet
+ sd_unet.list_unets()
+ startup_timer.record("scripts list_unets")
+
+ def load_model():
+ """
+ Accesses shared.sd_model property to load model.
+ After it's available, if it has been loaded before this access by some extension,
+ its optimization may be None because the list of optimizaers has neet been filled
+ by that time, so we apply optimization again.
+ """
+
+ shared.sd_model # noqa: B018
+
+ if sd_hijack.current_optimizer is None:
+ sd_hijack.apply_optimizations()
+
+ from modules import devices
+ devices.first_time_calculation()
+
+ Thread(target=load_model).start()
+
+ from modules import shared_items
+ shared_items.reload_hypernetworks()
+ startup_timer.record("reload hypernetworks")
+
+ from modules import ui_extra_networks
+ ui_extra_networks.initialize()
+ ui_extra_networks.register_default_pages()
+
+ from modules import extra_networks
+ extra_networks.initialize()
+ extra_networks.register_default_extra_networks()
+ startup_timer.record("initialize extra networks")
diff --git a/modules/initialize_util.py b/modules/initialize_util.py new file mode 100644 index 00000000..d8370576 --- /dev/null +++ b/modules/initialize_util.py @@ -0,0 +1,183 @@ +import json
+import os
+import signal
+import sys
+import re
+
+from modules.timer import startup_timer
+
+
+def gradio_server_name():
+ from modules.shared_cmd_options import cmd_opts
+
+ if cmd_opts.server_name:
+ return cmd_opts.server_name
+ else:
+ return "0.0.0.0" if cmd_opts.listen else None
+
+
+def fix_torch_version():
+ import torch
+
+ # Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
+ if ".dev" in torch.__version__ or "+git" in torch.__version__:
+ torch.__long_version__ = torch.__version__
+ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
+
+
+def fix_asyncio_event_loop_policy():
+ """
+ The default `asyncio` event loop policy only automatically creates
+ event loops in the main threads. Other threads must create event
+ loops explicitly or `asyncio.get_event_loop` (and therefore
+ `.IOLoop.current`) will fail. Installing this policy allows event
+ loops to be created automatically on any thread, matching the
+ behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
+ """
+
+ import asyncio
+
+ if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
+ # "Any thread" and "selector" should be orthogonal, but there's not a clean
+ # interface for composing policies so pick the right base.
+ _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore
+ else:
+ _BasePolicy = asyncio.DefaultEventLoopPolicy
+
+ class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore
+ """Event loop policy that allows loop creation on any thread.
+ Usage::
+
+ asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
+ """
+
+ def get_event_loop(self) -> asyncio.AbstractEventLoop:
+ try:
+ return super().get_event_loop()
+ except (RuntimeError, AssertionError):
+ # This was an AssertionError in python 3.4.2 (which ships with debian jessie)
+ # and changed to a RuntimeError in 3.4.3.
+ # "There is no current event loop in thread %r"
+ loop = self.new_event_loop()
+ self.set_event_loop(loop)
+ return loop
+
+ asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
+
+
+def restore_config_state_file():
+ from modules import shared, config_states
+
+ config_state_file = shared.opts.restore_config_state_file
+ if config_state_file == "":
+ return
+
+ shared.opts.restore_config_state_file = ""
+ shared.opts.save(shared.config_filename)
+
+ if os.path.isfile(config_state_file):
+ print(f"*** About to restore extension state from file: {config_state_file}")
+ with open(config_state_file, "r", encoding="utf-8") as f:
+ config_state = json.load(f)
+ config_states.restore_extension_config(config_state)
+ startup_timer.record("restore extension config")
+ elif config_state_file:
+ print(f"!!! Config state backup not found: {config_state_file}")
+
+
+def validate_tls_options():
+ from modules.shared_cmd_options import cmd_opts
+
+ if not (cmd_opts.tls_keyfile and cmd_opts.tls_certfile):
+ return
+
+ try:
+ if not os.path.exists(cmd_opts.tls_keyfile):
+ print("Invalid path to TLS keyfile given")
+ if not os.path.exists(cmd_opts.tls_certfile):
+ print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
+ except TypeError:
+ cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
+ print("TLS setup invalid, running webui without TLS")
+ else:
+ print("Running with TLS")
+ startup_timer.record("TLS")
+
+
+def get_gradio_auth_creds():
+ """
+ Convert the gradio_auth and gradio_auth_path commandline arguments into
+ an iterable of (username, password) tuples.
+ """
+ from modules.shared_cmd_options import cmd_opts
+
+ def process_credential_line(s):
+ s = s.strip()
+ if not s:
+ return None
+ return tuple(s.split(':', 1))
+
+ if cmd_opts.gradio_auth:
+ for cred in cmd_opts.gradio_auth.split(','):
+ cred = process_credential_line(cred)
+ if cred:
+ yield cred
+
+ if cmd_opts.gradio_auth_path:
+ with open(cmd_opts.gradio_auth_path, 'r', encoding="utf8") as file:
+ for line in file.readlines():
+ for cred in line.strip().split(','):
+ cred = process_credential_line(cred)
+ if cred:
+ yield cred
+
+
+def configure_sigint_handler():
+ # make the program just exit at ctrl+c without waiting for anything
+ def sigint_handler(sig, frame):
+ print(f'Interrupted with signal {sig} in {frame}')
+ os._exit(0)
+
+ if not os.environ.get("COVERAGE_RUN"):
+ # Don't install the immediate-quit handler when running under coverage,
+ # as then the coverage report won't be generated.
+ signal.signal(signal.SIGINT, sigint_handler)
+
+
+def configure_opts_onchange():
+ from modules import shared, sd_models, sd_vae, ui_tempdir, sd_hijack
+ from modules.call_queue import wrap_queued_call
+
+ shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)
+ shared.opts.onchange("sd_vae", wrap_queued_call(lambda: sd_vae.reload_vae_weights()), call=False)
+ shared.opts.onchange("sd_vae_overrides_per_model_preferences", wrap_queued_call(lambda: sd_vae.reload_vae_weights()), call=False)
+ shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed)
+ shared.opts.onchange("gradio_theme", shared.reload_gradio_theme)
+ shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False)
+ startup_timer.record("opts onchange")
+
+
+def setup_middleware(app):
+ from starlette.middleware.gzip import GZipMiddleware
+
+ app.middleware_stack = None # reset current middleware to allow modifying user provided list
+ app.add_middleware(GZipMiddleware, minimum_size=1000)
+ configure_cors_middleware(app)
+ app.build_middleware_stack() # rebuild middleware stack on-the-fly
+
+
+def configure_cors_middleware(app):
+ from starlette.middleware.cors import CORSMiddleware
+ from modules.shared_cmd_options import cmd_opts
+
+ cors_options = {
+ "allow_methods": ["*"],
+ "allow_headers": ["*"],
+ "allow_credentials": True,
+ }
+ if cmd_opts.cors_allow_origins:
+ cors_options["allow_origins"] = cmd_opts.cors_allow_origins.split(',')
+ if cmd_opts.cors_allow_origins_regex:
+ cors_options["allow_origin_regex"] = cmd_opts.cors_allow_origins_regex
+ app.add_middleware(CORSMiddleware, **cors_options)
+
diff --git a/modules/launch_utils.py b/modules/launch_utils.py index f77b577a..90c00dd2 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -1,4 +1,5 @@ # this scripts installs necessary requirements and launches main program in webui.py
+import logging
import re
import subprocess
import os
@@ -11,8 +12,10 @@ from functools import lru_cache from modules import cmd_args, errors
from modules.paths_internal import script_path, extensions_dir
from modules.timer import startup_timer
+from modules import logging_config
args, _ = cmd_args.parser.parse_known_args()
+logging_config.setup_logging(args.loglevel)
python = sys.executable
git = os.environ.get('GIT', "git")
@@ -139,6 +142,27 @@ def check_run_python(code: str) -> bool: return result.returncode == 0
+def git_fix_workspace(dir, name):
+ run(f'"{git}" -C "{dir}" fetch --refetch --no-auto-gc', f"Fetching all contents for {name}", f"Couldn't fetch {name}", live=True)
+ run(f'"{git}" -C "{dir}" gc --aggressive --prune=now', f"Pruning {name}", f"Couldn't prune {name}", live=True)
+ return
+
+
+def run_git(dir, name, command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live, autofix=True):
+ try:
+ return run(f'"{git}" -C "{dir}" {command}', desc=desc, errdesc=errdesc, custom_env=custom_env, live=live)
+ except RuntimeError:
+ pass
+
+ if not autofix:
+ return None
+
+ print(f"{errdesc}, attempting autofix...")
+ git_fix_workspace(dir, name)
+
+ return run(f'"{git}" -C "{dir}" {command}', desc=desc, errdesc=errdesc, custom_env=custom_env, live=live)
+
+
def git_clone(url, dir, name, commithash=None):
# TODO clone into temporary dir and move if successful
@@ -146,12 +170,14 @@ def git_clone(url, dir, name, commithash=None): if commithash is None:
return
- current_hash = run(f'"{git}" -C "{dir}" rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}", live=False).strip()
+ current_hash = run_git(dir, name, 'rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}", live=False).strip()
if current_hash == commithash:
return
- run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
- run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True)
+ run_git('fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
+
+ run_git('checkout', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True)
+
return
run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}", live=True)
@@ -226,6 +252,8 @@ def run_extensions_installers(settings_file): with startup_timer.subcategory("run extensions installers"):
for dirname_extension in list_extensions(settings_file):
+ logging.debug(f"Installing {dirname_extension}")
+
path = os.path.join(extensions_dir, dirname_extension)
if os.path.isdir(path):
diff --git a/modules/localization.py b/modules/localization.py index e8f585da..c1320288 100644 --- a/modules/localization.py +++ b/modules/localization.py @@ -1,7 +1,7 @@ import json
import os
-from modules import errors
+from modules import errors, scripts
localizations = {}
@@ -16,7 +16,6 @@ def list_localizations(dirname): localizations[fn] = os.path.join(dirname, file)
- from modules import scripts
for file in scripts.list_scripts("localizations", ".json"):
fn, ext = os.path.splitext(file.filename)
localizations[fn] = file.path
diff --git a/modules/logging_config.py b/modules/logging_config.py new file mode 100644 index 00000000..7db23d4b --- /dev/null +++ b/modules/logging_config.py @@ -0,0 +1,16 @@ +import os
+import logging
+
+
+def setup_logging(loglevel):
+ if loglevel is None:
+ loglevel = os.environ.get("SD_WEBUI_LOG_LEVEL")
+
+ if loglevel:
+ log_level = getattr(logging, loglevel.upper(), None) or logging.INFO
+ logging.basicConfig(
+ level=log_level,
+ format='%(asctime)s %(levelname)s [%(name)s] %(message)s',
+ datefmt='%Y-%m-%d %H:%M:%S',
+ )
+
diff --git a/modules/lowvram.py b/modules/lowvram.py index 3f830664..96f52b7b 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -15,6 +15,9 @@ def send_everything_to_cpu(): def setup_for_low_vram(sd_model, use_medvram):
+ if getattr(sd_model, 'lowvram', False):
+ return
+
sd_model.lowvram = True
parents = {}
diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 9ceb43ba..bce527cc 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -4,6 +4,7 @@ import torch import platform from modules.sd_hijack_utils import CondFunc from packaging import version +from modules import shared log = logging.getLogger(__name__) @@ -30,8 +31,7 @@ has_mps = check_for_mps() def torch_mps_gc() -> None: try: - from modules.shared import state - if state.current_latent is not None: + if shared.state.current_latent is not None: log.debug("`current_latent` is set, skipping MPS garbage collection") return from torch.mps import empty_cache diff --git a/modules/options.py b/modules/options.py new file mode 100644 index 00000000..59cb75ec --- /dev/null +++ b/modules/options.py @@ -0,0 +1,236 @@ +import json
+import sys
+
+import gradio as gr
+
+from modules import errors
+from modules.shared_cmd_options import cmd_opts
+
+
+class OptionInfo:
+ def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''):
+ self.default = default
+ self.label = label
+ self.component = component
+ self.component_args = component_args
+ self.onchange = onchange
+ self.section = section
+ self.refresh = refresh
+ self.do_not_save = False
+
+ self.comment_before = comment_before
+ """HTML text that will be added after label in UI"""
+
+ self.comment_after = comment_after
+ """HTML text that will be added before label in UI"""
+
+ def link(self, label, url):
+ self.comment_before += f"[<a href='{url}' target='_blank'>{label}</a>]"
+ return self
+
+ def js(self, label, js_func):
+ self.comment_before += f"[<a onclick='{js_func}(); return false'>{label}</a>]"
+ return self
+
+ def info(self, info):
+ self.comment_after += f"<span class='info'>({info})</span>"
+ return self
+
+ def html(self, html):
+ self.comment_after += html
+ return self
+
+ def needs_restart(self):
+ self.comment_after += " <span class='info'>(requires restart)</span>"
+ return self
+
+ def needs_reload_ui(self):
+ self.comment_after += " <span class='info'>(requires Reload UI)</span>"
+ return self
+
+
+class OptionHTML(OptionInfo):
+ def __init__(self, text):
+ super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs))
+
+ self.do_not_save = True
+
+
+def options_section(section_identifier, options_dict):
+ for v in options_dict.values():
+ v.section = section_identifier
+
+ return options_dict
+
+
+options_builtin_fields = {"data_labels", "data", "restricted_opts", "typemap"}
+
+
+class Options:
+ typemap = {int: float}
+
+ def __init__(self, data_labels, restricted_opts):
+ self.data_labels = data_labels
+ self.data = {k: v.default for k, v in self.data_labels.items()}
+ self.restricted_opts = restricted_opts
+
+ def __setattr__(self, key, value):
+ if key in options_builtin_fields:
+ return super(Options, self).__setattr__(key, value)
+
+ if self.data is not None:
+ if key in self.data or key in self.data_labels:
+ assert not cmd_opts.freeze_settings, "changing settings is disabled"
+
+ info = self.data_labels.get(key, None)
+ if info.do_not_save:
+ return
+
+ comp_args = info.component_args if info else None
+ if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
+ raise RuntimeError(f"not possible to set {key} because it is restricted")
+
+ if cmd_opts.hide_ui_dir_config and key in self.restricted_opts:
+ raise RuntimeError(f"not possible to set {key} because it is restricted")
+
+ self.data[key] = value
+ return
+
+ return super(Options, self).__setattr__(key, value)
+
+ def __getattr__(self, item):
+ if item in options_builtin_fields:
+ return super(Options, self).__getattribute__(item)
+
+ if self.data is not None:
+ if item in self.data:
+ return self.data[item]
+
+ if item in self.data_labels:
+ return self.data_labels[item].default
+
+ return super(Options, self).__getattribute__(item)
+
+ def set(self, key, value):
+ """sets an option and calls its onchange callback, returning True if the option changed and False otherwise"""
+
+ oldval = self.data.get(key, None)
+ if oldval == value:
+ return False
+
+ if self.data_labels[key].do_not_save:
+ return False
+
+ try:
+ setattr(self, key, value)
+ except RuntimeError:
+ return False
+
+ if self.data_labels[key].onchange is not None:
+ try:
+ self.data_labels[key].onchange()
+ except Exception as e:
+ errors.display(e, f"changing setting {key} to {value}")
+ setattr(self, key, oldval)
+ return False
+
+ return True
+
+ def get_default(self, key):
+ """returns the default value for the key"""
+
+ data_label = self.data_labels.get(key)
+ if data_label is None:
+ return None
+
+ return data_label.default
+
+ def save(self, filename):
+ assert not cmd_opts.freeze_settings, "saving settings is disabled"
+
+ with open(filename, "w", encoding="utf8") as file:
+ json.dump(self.data, file, indent=4)
+
+ def same_type(self, x, y):
+ if x is None or y is None:
+ return True
+
+ type_x = self.typemap.get(type(x), type(x))
+ type_y = self.typemap.get(type(y), type(y))
+
+ return type_x == type_y
+
+ def load(self, filename):
+ with open(filename, "r", encoding="utf8") as file:
+ self.data = json.load(file)
+
+ # 1.6.0 VAE defaults
+ if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None:
+ self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default')
+
+ # 1.1.1 quicksettings list migration
+ if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None:
+ self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')]
+
+ # 1.4.0 ui_reorder
+ if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data:
+ self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')]
+
+ bad_settings = 0
+ for k, v in self.data.items():
+ info = self.data_labels.get(k, None)
+ if info is not None and not self.same_type(info.default, v):
+ print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
+ bad_settings += 1
+
+ if bad_settings > 0:
+ print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
+
+ def onchange(self, key, func, call=True):
+ item = self.data_labels.get(key)
+ item.onchange = func
+
+ if call:
+ func()
+
+ def dumpjson(self):
+ d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()}
+ d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None}
+ d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None}
+ return json.dumps(d)
+
+ def add_option(self, key, info):
+ self.data_labels[key] = info
+
+ def reorder(self):
+ """reorder settings so that all items related to section always go together"""
+
+ section_ids = {}
+ settings_items = self.data_labels.items()
+ for _, item in settings_items:
+ if item.section not in section_ids:
+ section_ids[item.section] = len(section_ids)
+
+ self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section]))
+
+ def cast_value(self, key, value):
+ """casts an arbitrary to the same type as this setting's value with key
+ Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str)
+ """
+
+ if value is None:
+ return None
+
+ default_value = self.data_labels[key].default
+ if default_value is None:
+ default_value = getattr(self, key, None)
+ if default_value is None:
+ return None
+
+ expected_type = type(default_value)
+ if expected_type == bool and value == "False":
+ value = False
+ else:
+ value = expected_type(value)
+
+ return value
diff --git a/modules/processing.py b/modules/processing.py index 8086a2b0..6961b7b1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -14,8 +14,10 @@ from skimage import exposure from typing import Any, Dict, List
import modules.sd_hijack
-from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors
+from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng
+from modules.rng import slerp # noqa: F401
from modules.sd_hijack import model_hijack
+from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.paths as paths
@@ -30,7 +32,6 @@ from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion from einops import repeat, rearrange
from blendmodes.blend import blendLayers, BlendType
-decode_first_stage = sd_samplers_common.decode_first_stage
# some of those options should not be changed at all because they would break the model, so I removed them from options.
opt_C = 4
@@ -84,7 +85,7 @@ def txt2img_image_conditioning(sd_model, x, width, height): # The "masked-image" in this case will just be all zeros since the entire image is masked.
image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
- image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning))
+ image_conditioning = images_tensor_to_samples(image_conditioning, approximation_indexes.get(opts.sd_vae_encode_method))
# Add the fake full 1s mask to the first dimension.
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
@@ -110,7 +111,7 @@ class StableDiffusionProcessing: cached_uc = [None, None]
cached_c = [None, None]
- def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None):
+ def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None):
if sampler_index is not None:
print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
@@ -148,8 +149,8 @@ class StableDiffusionProcessing: self.s_min_uncond = s_min_uncond or opts.s_min_uncond
self.s_churn = s_churn or opts.s_churn
self.s_tmin = s_tmin or opts.s_tmin
- self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option
- self.s_noise = s_noise or opts.s_noise
+ self.s_tmax = (s_tmax if s_tmax is not None else opts.s_tmax) or float('inf')
+ self.s_noise = s_noise if s_noise is not None else opts.s_noise
self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts}
self.override_settings_restore_afterwards = override_settings_restore_afterwards
self.is_using_inpainting_conditioning = False
@@ -172,6 +173,8 @@ class StableDiffusionProcessing: self.iteration = 0
self.is_hr_pass = False
self.sampler = None
+ self.main_prompt = None
+ self.main_negative_prompt = None
self.prompts = None
self.negative_prompts = None
@@ -184,6 +187,7 @@ class StableDiffusionProcessing: self.cached_c = StableDiffusionProcessing.cached_c
self.uc = None
self.c = None
+ self.rng: rng.ImageRNG = None
self.user = None
@@ -203,7 +207,7 @@ class StableDiffusionProcessing: midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device)
midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size)
- conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image))
+ conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method))
conditioning = torch.nn.functional.interpolate(
self.sd_model.depth_model(midas_in),
size=conditioning_image.shape[2:],
@@ -216,7 +220,7 @@ class StableDiffusionProcessing: return conditioning
def edit_image_conditioning(self, source_image):
- conditioning_image = self.sd_model.encode_first_stage(source_image).mode()
+ conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method))
return conditioning_image
@@ -295,7 +299,7 @@ class StableDiffusionProcessing: self.sampler = None
self.c = None
self.uc = None
- if not opts.experimental_persistent_cond_cache:
+ if not opts.persistent_cond_cache:
StableDiffusionProcessing.cached_c = [None, None]
StableDiffusionProcessing.cached_uc = [None, None]
@@ -319,6 +323,24 @@ class StableDiffusionProcessing: self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts]
self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts]
+ self.main_prompt = self.all_prompts[0]
+ self.main_negative_prompt = self.all_negative_prompts[0]
+
+ def cached_params(self, required_prompts, steps, extra_network_data):
+ """Returns parameters that invalidate the cond cache if changed"""
+
+ return (
+ required_prompts,
+ steps,
+ opts.CLIP_stop_at_last_layers,
+ shared.sd_model.sd_checkpoint_info,
+ extra_network_data,
+ opts.sdxl_crop_left,
+ opts.sdxl_crop_top,
+ self.width,
+ self.height,
+ )
+
def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data):
"""
Returns the result of calling function(shared.sd_model, required_prompts, steps)
@@ -332,17 +354,7 @@ class StableDiffusionProcessing: caches is a list with items described above.
"""
- cached_params = (
- required_prompts,
- steps,
- opts.CLIP_stop_at_last_layers,
- shared.sd_model.sd_checkpoint_info,
- extra_network_data,
- opts.sdxl_crop_left,
- opts.sdxl_crop_top,
- self.width,
- self.height,
- )
+ cached_params = self.cached_params(required_prompts, steps, extra_network_data)
for cache in caches:
if cache[0] is not None and cached_params == cache[0]:
@@ -368,6 +380,10 @@ class StableDiffusionProcessing: def parse_extra_network_prompts(self):
self.prompts, self.extra_network_data = extra_networks.parse_prompts(self.prompts)
+ def save_samples(self) -> bool:
+ """Returns whether generated images need to be written to disk"""
+ return opts.samples_save and not self.do_not_save_samples and (opts.save_incomplete_images or not state.interrupted and not state.skipped)
+
class Processed:
def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""):
@@ -461,86 +477,17 @@ class Processed: return self.token_merging_ratio_hr if for_hr else self.token_merging_ratio
-# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
-def slerp(val, low, high):
- low_norm = low/torch.norm(low, dim=1, keepdim=True)
- high_norm = high/torch.norm(high, dim=1, keepdim=True)
- dot = (low_norm*high_norm).sum(1)
-
- if dot.mean() > 0.9995:
- return low * val + high * (1 - val)
-
- omega = torch.acos(dot)
- so = torch.sin(omega)
- res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
- return res
-
-
def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
- eta_noise_seed_delta = opts.eta_noise_seed_delta or 0
- xs = []
-
- # if we have multiple seeds, this means we are working with batch size>1; this then
- # enables the generation of additional tensors with noise that the sampler will use during its processing.
- # Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to
- # produce the same images as with two batches [100], [101].
- if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or eta_noise_seed_delta > 0):
- sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
- else:
- sampler_noises = None
-
- for i, seed in enumerate(seeds):
- noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8)
-
- subnoise = None
- if subseeds is not None and subseed_strength != 0:
- subseed = 0 if i >= len(subseeds) else subseeds[i]
-
- subnoise = devices.randn(subseed, noise_shape)
-
- # randn results depend on device; gpu and cpu get different results for same seed;
- # the way I see it, it's better to do this on CPU, so that everyone gets same result;
- # but the original script had it like this, so I do not dare change it for now because
- # it will break everyone's seeds.
- noise = devices.randn(seed, noise_shape)
-
- if subnoise is not None:
- noise = slerp(subseed_strength, noise, subnoise)
-
- if noise_shape != shape:
- x = devices.randn(seed, shape)
- dx = (shape[2] - noise_shape[2]) // 2
- dy = (shape[1] - noise_shape[1]) // 2
- w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
- h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
- tx = 0 if dx < 0 else dx
- ty = 0 if dy < 0 else dy
- dx = max(-dx, 0)
- dy = max(-dy, 0)
-
- x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w]
- noise = x
+ g = rng.ImageRNG(shape, seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=seed_resize_from_h, seed_resize_from_w=seed_resize_from_w)
+ return g.next()
- if sampler_noises is not None:
- cnt = p.sampler.number_of_needed_noises(p)
- if eta_noise_seed_delta > 0:
- devices.manual_seed(seed + eta_noise_seed_delta)
-
- for j in range(cnt):
- sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
-
- xs.append(noise)
-
- if sampler_noises is not None:
- p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises]
-
- x = torch.stack(xs).to(shared.device)
- return x
+class DecodedSamples(list):
+ already_decoded = True
def decode_latent_batch(model, batch, target_device=None, check_for_nans=False):
- samples = []
+ samples = DecodedSamples()
for i in range(batch.shape[0]):
sample = decode_first_stage(model, batch[i:i + 1])[0]
@@ -555,7 +502,7 @@ def decode_latent_batch(model, batch, target_device=None, check_for_nans=False): errors.print_error_explanation(
"A tensor with all NaNs was produced in VAE.\n"
"Web UI will now convert VAE into 32-bit float and retry.\n"
- "To disable this behavior, disable the 'Automaticlly revert VAE to 32-bit floats' setting.\n"
+ "To disable this behavior, disable the 'Automatically revert VAE to 32-bit floats' setting.\n"
"To always start with 32-bit VAE, use --no-half-vae commandline flag."
)
@@ -640,8 +587,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
- prompt_text = p.prompt if use_main_prompt else all_prompts[index]
- negative_prompt_text = f"\nNegative prompt: {all_negative_prompts[index]}" if all_negative_prompts[index] else ""
+ prompt_text = p.main_prompt if use_main_prompt else all_prompts[index]
+ negative_prompt_text = f"\nNegative prompt: {p.main_negative_prompt if use_main_prompt else all_negative_prompts[index]}" if all_negative_prompts[index] else ""
return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip()
@@ -751,6 +698,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
p.subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
+ p.rng = rng.ImageRNG((opt_C, p.height // opt_f, p.width // opt_f), p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w)
+
if p.scripts is not None:
p.scripts.before_process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds)
@@ -788,7 +737,14 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
- x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True)
+ if getattr(samples_ddim, 'already_decoded', False):
+ x_samples_ddim = samples_ddim
+ else:
+ if opts.sd_vae_decode_method != 'Full':
+ p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method
+
+ x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True)
+
x_samples_ddim = torch.stack(x_samples_ddim).float()
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
@@ -812,6 +768,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: def infotext(index=0, use_main_prompt=False):
return create_infotext(p, p.prompts, p.seeds, p.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=p.negative_prompts)
+ save_samples = p.save_samples()
+
for i, x_sample in enumerate(x_samples_ddim):
p.batch_index = i
@@ -819,7 +777,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: x_sample = x_sample.astype(np.uint8)
if p.restore_faces:
- if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
+ if save_samples and opts.save_images_before_face_restoration:
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-face-restoration")
devices.torch_gc()
@@ -833,16 +791,15 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: pp = scripts.PostprocessImageArgs(image)
p.scripts.postprocess_image(p, pp)
image = pp.image
-
if p.color_corrections is not None and i < len(p.color_corrections):
- if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
+ if save_samples and opts.save_images_before_color_correction:
image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction")
image = apply_color_correction(p.color_corrections[i], image)
image = apply_overlay(image, p.paste_to, i, p.overlay_images)
- if opts.samples_save and not p.do_not_save_samples:
+ if save_samples:
images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p)
text = infotext(i)
@@ -850,8 +807,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if opts.enable_pnginfo:
image.info["parameters"] = text
output_images.append(image)
-
- if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]):
+ if save_samples and hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]):
image_mask = p.mask_for_overlay.convert('RGB')
image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
@@ -887,7 +843,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: grid.info["parameters"] = text
output_images.insert(0, grid)
index_of_first_image = 1
-
if opts.grid_save:
images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(use_main_prompt=True), short_filename=not opts.grid_extended_filename, p=p, grid=True)
@@ -930,7 +885,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): cached_hr_uc = [None, None]
cached_hr_c = [None, None]
- def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler_name: str = None, hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs):
+ def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_checkpoint_name: str = None, hr_sampler_name: str = None, hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs):
super().__init__(**kwargs)
self.enable_hr = enable_hr
self.denoising_strength = denoising_strength
@@ -941,11 +896,14 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_resize_y = hr_resize_y
self.hr_upscale_to_x = hr_resize_x
self.hr_upscale_to_y = hr_resize_y
+ self.hr_checkpoint_name = hr_checkpoint_name
+ self.hr_checkpoint_info = None
self.hr_sampler_name = hr_sampler_name
self.hr_prompt = hr_prompt
self.hr_negative_prompt = hr_negative_prompt
self.all_hr_prompts = None
self.all_hr_negative_prompts = None
+ self.latent_scale_mode = None
if firstphase_width != 0 or firstphase_height != 0:
self.hr_upscale_to_x = self.width
@@ -968,6 +926,14 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
+ if self.hr_checkpoint_name:
+ self.hr_checkpoint_info = sd_models.get_closet_checkpoint_match(self.hr_checkpoint_name)
+
+ if self.hr_checkpoint_info is None:
+ raise Exception(f'Could not find checkpoint with name {self.hr_checkpoint_name}')
+
+ self.extra_generation_params["Hires checkpoint"] = self.hr_checkpoint_info.short_title
+
if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name:
self.extra_generation_params["Hires sampler"] = self.hr_sampler_name
@@ -977,6 +943,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if tuple(self.hr_negative_prompt) != tuple(self.negative_prompt):
self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt
+ self.latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
+ if self.enable_hr and self.latent_scale_mode is None:
+ if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers):
+ raise Exception(f"could not find upscaler named {self.hr_upscaler}")
+
if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height):
self.hr_resize_x = self.width
self.hr_resize_y = self.height
@@ -1015,14 +986,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f
self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f
- # special case: the user has chosen to do nothing
- if self.hr_upscale_to_x == self.width and self.hr_upscale_to_y == self.height:
- self.enable_hr = False
- self.denoising_strength = None
- self.extra_generation_params.pop("Hires upscale", None)
- self.extra_generation_params.pop("Hires resize", None)
- return
-
if not state.processing_has_refined_job_count:
if state.job_count == -1:
state.job_count = self.n_iter
@@ -1040,17 +1003,32 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
- latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
- if self.enable_hr and latent_scale_mode is None:
- if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers):
- raise Exception(f"could not find upscaler named {self.hr_upscaler}")
-
- x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
+ x = self.rng.next()
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
+ del x
if not self.enable_hr:
return samples
+ if self.latent_scale_mode is None:
+ decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32)
+ else:
+ decoded_samples = None
+
+ current = shared.sd_model.sd_checkpoint_info
+ try:
+ if self.hr_checkpoint_info is not None:
+ self.sampler = None
+ sd_models.reload_model_weights(info=self.hr_checkpoint_info)
+ devices.torch_gc()
+
+ return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts)
+ finally:
+ self.sampler = None
+ sd_models.reload_model_weights(info=current)
+ devices.torch_gc()
+
+ def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts):
self.is_hr_pass = True
target_width = self.hr_upscale_to_x
@@ -1059,7 +1037,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def save_intermediate(image, index):
"""saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images"""
- if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix:
+ if not self.save_samples() or not opts.save_images_before_highres_fix:
return
if not isinstance(image, Image.Image):
@@ -1068,11 +1046,15 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index)
images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, p=self, suffix="-before-highres-fix")
- if latent_scale_mode is not None:
+ img2img_sampler_name = self.hr_sampler_name or self.sampler_name
+
+ self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model)
+
+ if self.latent_scale_mode is not None:
for i in range(samples.shape[0]):
save_intermediate(samples, i)
- samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"])
+ samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=self.latent_scale_mode["mode"], antialias=self.latent_scale_mode["antialias"])
# Avoid making the inpainting conditioning unless necessary as
# this does need some extra compute to decode / encode the image again.
@@ -1081,7 +1063,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): else:
image_conditioning = self.txt2img_image_conditioning(samples)
else:
- decoded_samples = decode_first_stage(self.sd_model, samples)
lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
batch_images = []
@@ -1098,28 +1079,22 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): batch_images.append(image)
decoded_samples = torch.from_numpy(np.array(batch_images))
- decoded_samples = decoded_samples.to(shared.device)
- decoded_samples = 2. * decoded_samples - 1.
+ decoded_samples = decoded_samples.to(shared.device, dtype=devices.dtype_vae)
- samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
+ if opts.sd_vae_encode_method != 'Full':
+ self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
+ samples = images_tensor_to_samples(decoded_samples, approximation_indexes.get(opts.sd_vae_encode_method))
image_conditioning = self.img2img_image_conditioning(decoded_samples, samples)
shared.state.nextjob()
- img2img_sampler_name = self.hr_sampler_name or self.sampler_name
-
- if self.sampler_name in ['PLMS', 'UniPC']: # PLMS/UniPC do not support img2img so we just silently switch to DDIM
- img2img_sampler_name = 'DDIM'
-
- self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model)
-
samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2]
- noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self)
+ self.rng = rng.ImageRNG(samples.shape[1:], self.seeds, subseeds=self.subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w)
+ noise = self.rng.next()
# GC now before running the next img2img to prevent running out of memory
- x = None
devices.torch_gc()
if not self.disable_extra_networks:
@@ -1138,15 +1113,17 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio())
+ decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)
+
self.is_hr_pass = False
- return samples
+ return decoded_samples
def close(self):
super().close()
self.hr_c = None
self.hr_uc = None
- if not opts.experimental_persistent_cond_cache:
+ if not opts.persistent_cond_cache:
StableDiffusionProcessingTxt2Img.cached_hr_uc = [None, None]
StableDiffusionProcessingTxt2Img.cached_hr_c = [None, None]
@@ -1179,8 +1156,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_c is not None:
return
- self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.hr_negative_prompts, self.steps * self.step_multiplier, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data)
- self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.hr_prompts, self.steps * self.step_multiplier, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data)
+ hr_prompts = prompt_parser.SdConditioning(self.hr_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y)
+ hr_negative_prompts = prompt_parser.SdConditioning(self.hr_negative_prompts, width=self.hr_upscale_to_x, height=self.hr_upscale_to_y, is_negative_prompt=True)
+
+ self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, self.steps * self.step_multiplier, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data)
+ self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, self.steps * self.step_multiplier, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data)
def setup_conds(self):
super().setup_conds()
@@ -1188,7 +1168,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.hr_uc = None
self.hr_c = None
- if self.enable_hr:
+ if self.enable_hr and self.hr_checkpoint_info is None:
if shared.opts.hires_fix_use_firstpass_conds:
self.calculate_hr_conds()
@@ -1339,10 +1319,12 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
image = torch.from_numpy(batch_images)
- image = 2. * image - 1.
image = image.to(shared.device, dtype=devices.dtype_vae)
- self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
+ if opts.sd_vae_encode_method != 'Full':
+ self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
+
+ self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model)
devices.torch_gc()
if self.resize_mode == 3:
@@ -1368,7 +1350,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, image_mask)
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
- x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
+ x = self.rng.next()
if self.initial_noise_multiplier != 1.0:
self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 8169a459..32d214e3 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -20,7 +20,7 @@ prompt: (emphasized | scheduled | alternate | plain | WHITESPACE)* | "(" prompt ":" prompt ")"
| "[" prompt "]"
scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER [WHITESPACE] "]"
-alternate: "[" prompt ("|" prompt)+ "]"
+alternate: "[" prompt ("|" [prompt])+ "]"
WHITESPACE: /\s+/
plain: /([^\\\[\]():|]|\\.)+/
%import common.SIGNED_NUMBER -> NUMBER
@@ -53,6 +53,10 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): [[3, '((a][:b:c '], [10, '((a][:b:c d']]
>>> g("[a|(b:1.1)]")
[[1, 'a'], [2, '(b:1.1)'], [3, 'a'], [4, '(b:1.1)'], [5, 'a'], [6, '(b:1.1)'], [7, 'a'], [8, '(b:1.1)'], [9, 'a'], [10, '(b:1.1)']]
+ >>> g("[fe|]male")
+ [[1, 'female'], [2, 'male'], [3, 'female'], [4, 'male'], [5, 'female'], [6, 'male'], [7, 'female'], [8, 'male'], [9, 'female'], [10, 'male']]
+ >>> g("[fe|||]male")
+ [[1, 'female'], [2, 'male'], [3, 'male'], [4, 'male'], [5, 'female'], [6, 'male'], [7, 'male'], [8, 'male'], [9, 'female'], [10, 'male']]
"""
def collect_steps(steps, tree):
@@ -78,7 +82,8 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): before, after, _, when, _ = args
yield before or () if step <= when else after
def alternate(self, args):
- yield next(args[(step - 1)%len(args)])
+ args = ["" if not arg else arg for arg in args]
+ yield args[(step - 1) % len(args)]
def start(self, args):
def flatten(x):
if type(x) == str:
diff --git a/modules/rng.py b/modules/rng.py new file mode 100644 index 00000000..f927a318 --- /dev/null +++ b/modules/rng.py @@ -0,0 +1,170 @@ +import torch
+
+from modules import devices, rng_philox, shared
+
+
+def randn(seed, shape, generator=None):
+ """Generate a tensor with random numbers from a normal distribution using seed.
+
+ Uses the seed parameter to set the global torch seed; to generate more with that seed, use randn_like/randn_without_seed."""
+
+ manual_seed(seed)
+
+ if shared.opts.randn_source == "NV":
+ return torch.asarray((generator or nv_rng).randn(shape), device=devices.device)
+
+ if shared.opts.randn_source == "CPU" or devices.device.type == 'mps':
+ return torch.randn(shape, device=devices.cpu, generator=generator).to(devices.device)
+
+ return torch.randn(shape, device=devices.device, generator=generator)
+
+
+def randn_local(seed, shape):
+ """Generate a tensor with random numbers from a normal distribution using seed.
+
+ Does not change the global random number generator. You can only generate the seed's first tensor using this function."""
+
+ if shared.opts.randn_source == "NV":
+ rng = rng_philox.Generator(seed)
+ return torch.asarray(rng.randn(shape), device=devices.device)
+
+ local_device = devices.cpu if shared.opts.randn_source == "CPU" or devices.device.type == 'mps' else devices.device
+ local_generator = torch.Generator(local_device).manual_seed(int(seed))
+ return torch.randn(shape, device=local_device, generator=local_generator).to(devices.device)
+
+
+def randn_like(x):
+ """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator.
+
+ Use either randn() or manual_seed() to initialize the generator."""
+
+ if shared.opts.randn_source == "NV":
+ return torch.asarray(nv_rng.randn(x.shape), device=x.device, dtype=x.dtype)
+
+ if shared.opts.randn_source == "CPU" or x.device.type == 'mps':
+ return torch.randn_like(x, device=devices.cpu).to(x.device)
+
+ return torch.randn_like(x)
+
+
+def randn_without_seed(shape, generator=None):
+ """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator.
+
+ Use either randn() or manual_seed() to initialize the generator."""
+
+ if shared.opts.randn_source == "NV":
+ return torch.asarray((generator or nv_rng).randn(shape), device=devices.device)
+
+ if shared.opts.randn_source == "CPU" or devices.device.type == 'mps':
+ return torch.randn(shape, device=devices.cpu, generator=generator).to(devices.device)
+
+ return torch.randn(shape, device=devices.device, generator=generator)
+
+
+def manual_seed(seed):
+ """Set up a global random number generator using the specified seed."""
+
+ if shared.opts.randn_source == "NV":
+ global nv_rng
+ nv_rng = rng_philox.Generator(seed)
+ return
+
+ torch.manual_seed(seed)
+
+
+def create_generator(seed):
+ if shared.opts.randn_source == "NV":
+ return rng_philox.Generator(seed)
+
+ device = devices.cpu if shared.opts.randn_source == "CPU" or devices.device.type == 'mps' else devices.device
+ generator = torch.Generator(device).manual_seed(int(seed))
+ return generator
+
+
+# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
+def slerp(val, low, high):
+ low_norm = low/torch.norm(low, dim=1, keepdim=True)
+ high_norm = high/torch.norm(high, dim=1, keepdim=True)
+ dot = (low_norm*high_norm).sum(1)
+
+ if dot.mean() > 0.9995:
+ return low * val + high * (1 - val)
+
+ omega = torch.acos(dot)
+ so = torch.sin(omega)
+ res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
+ return res
+
+
+class ImageRNG:
+ def __init__(self, shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0):
+ self.shape = shape
+ self.seeds = seeds
+ self.subseeds = subseeds
+ self.subseed_strength = subseed_strength
+ self.seed_resize_from_h = seed_resize_from_h
+ self.seed_resize_from_w = seed_resize_from_w
+
+ self.generators = [create_generator(seed) for seed in seeds]
+
+ self.is_first = True
+
+ def first(self):
+ noise_shape = self.shape if self.seed_resize_from_h <= 0 or self.seed_resize_from_w <= 0 else (self.shape[0], self.seed_resize_from_h // 8, self.seed_resize_from_w // 8)
+
+ xs = []
+
+ for i, (seed, generator) in enumerate(zip(self.seeds, self.generators)):
+ subnoise = None
+ if self.subseeds is not None and self.subseed_strength != 0:
+ subseed = 0 if i >= len(self.subseeds) else self.subseeds[i]
+ subnoise = randn(subseed, noise_shape)
+
+ if noise_shape != self.shape:
+ noise = randn(seed, noise_shape)
+ else:
+ noise = randn(seed, self.shape, generator=generator)
+
+ if subnoise is not None:
+ noise = slerp(self.subseed_strength, noise, subnoise)
+
+ if noise_shape != self.shape:
+ x = randn(seed, self.shape, generator=generator)
+ dx = (self.shape[2] - noise_shape[2]) // 2
+ dy = (self.shape[1] - noise_shape[1]) // 2
+ w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
+ h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
+ tx = 0 if dx < 0 else dx
+ ty = 0 if dy < 0 else dy
+ dx = max(-dx, 0)
+ dy = max(-dy, 0)
+
+ x[:, ty:ty + h, tx:tx + w] = noise[:, dy:dy + h, dx:dx + w]
+ noise = x
+
+ xs.append(noise)
+
+ eta_noise_seed_delta = shared.opts.eta_noise_seed_delta or 0
+ if eta_noise_seed_delta:
+ self.generators = [create_generator(seed + eta_noise_seed_delta) for seed in self.seeds]
+
+ return torch.stack(xs).to(shared.device)
+
+ def next(self):
+ if self.is_first:
+ self.is_first = False
+ return self.first()
+
+ xs = []
+ for generator in self.generators:
+ x = randn_without_seed(self.shape, generator=generator)
+ xs.append(x)
+
+ return torch.stack(xs).to(shared.device)
+
+
+devices.randn = randn
+devices.randn_local = randn_local
+devices.randn_like = randn_like
+devices.randn_without_seed = randn_without_seed
+devices.manual_seed = manual_seed
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 609fd56c..46652fbd 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -29,8 +29,10 @@ ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.Cros ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
# silence new console spam from SD2
-ldm.modules.attention.print = lambda *args: None
-ldm.modules.diffusionmodules.model.print = lambda *args: None
+ldm.modules.attention.print = shared.ldm_print
+ldm.modules.diffusionmodules.model.print = shared.ldm_print
+ldm.util.print = shared.ldm_print
+ldm.models.diffusion.ddpm.print = shared.ldm_print
optimizers = []
current_optimizer: sd_hijack_optimizations.SdOptimization = None
diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py deleted file mode 100644 index c1977b19..00000000 --- a/modules/sd_hijack_inpainting.py +++ /dev/null @@ -1,97 +0,0 @@ -import torch - -import ldm.models.diffusion.ddpm -import ldm.models.diffusion.ddim -import ldm.models.diffusion.plms - -from ldm.models.diffusion.ddim import noise_like -from ldm.models.diffusion.sampling_util import norm_thresholding - - -@torch.no_grad() -def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None, dynamic_threshold=None): - b, *_, device = *x.shape, x.device - - def get_model_output(x, t): - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - - if isinstance(c, dict): - assert isinstance(unconditional_conditioning, dict) - c_in = {} - for k in c: - if isinstance(c[k], list): - c_in[k] = [ - torch.cat([unconditional_conditioning[k][i], c[k][i]]) - for i in range(len(c[k])) - ] - else: - c_in[k] = torch.cat([unconditional_conditioning[k], c[k]]) - else: - c_in = torch.cat([unconditional_conditioning, c]) - - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - return e_t - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - - def get_x_prev_and_pred_x0(e_t, index): - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - if dynamic_threshold is not None: - pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - e_t = get_model_output(x, t) - if len(old_eps) == 0: - # Pseudo Improved Euler (2nd order) - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) - e_t_next = get_model_output(x_prev, t_next) - e_t_prime = (e_t + e_t_next) / 2 - elif len(old_eps) == 1: - # 2nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (3 * e_t - old_eps[-1]) / 2 - elif len(old_eps) == 2: - # 3nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 - elif len(old_eps) >= 3: - # 4nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 - - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) - - return x_prev, pred_x0, e_t - - -def do_inpainting_hijack(): - # p_sample_plms is needed because PLMS can't work with dicts as conditionings - - ldm.models.diffusion.plms.PLMSSampler.p_sample_plms = p_sample_plms diff --git a/modules/sd_models.py b/modules/sd_models.py index ba15b451..7a866a07 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -14,8 +14,7 @@ import ldm.modules.midas as midas from ldm.util import instantiate_from_config
-from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache
-from modules.sd_hijack_inpainting import do_inpainting_hijack
+from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack
from modules.timer import Timer
import tomesd
@@ -67,8 +66,11 @@ class CheckpointInfo: self.shorthash = self.sha256[0:10] if self.sha256 else None
self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]'
+ self.short_title = self.name_for_extra if self.shorthash is None else f'{self.name_for_extra} [{self.shorthash}]'
- self.ids = [self.hash, self.model_name, self.title, name, self.name_for_extra, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else [])
+ self.ids = [self.hash, self.model_name, self.title, name, self.name_for_extra, f'{name} [{self.hash}]']
+ if self.shorthash:
+ self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]', f'{self.name_for_extra} [{self.shorthash}]']
def register(self):
checkpoints_list[self.title] = self
@@ -80,13 +82,18 @@ class CheckpointInfo: if self.sha256 is None:
return
- self.shorthash = self.sha256[0:10]
+ shorthash = self.sha256[0:10]
+ if self.shorthash == self.sha256[0:10]:
+ return self.shorthash
+
+ self.shorthash = shorthash
if self.shorthash not in self.ids:
- self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]']
+ self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]', f'{self.name_for_extra} [{self.shorthash}]']
checkpoints_list.pop(self.title, None)
self.title = f'{self.name} [{self.shorthash}]'
+ self.short_title = f'{self.name_for_extra} [{self.shorthash}]'
self.register()
return self.shorthash
@@ -107,14 +114,8 @@ def setup_model(): enable_midas_autodownload()
-def checkpoint_tiles():
- def convert(name):
- return int(name) if name.isdigit() else name.lower()
-
- def alphanumeric_key(key):
- return [convert(c) for c in re.split('([0-9]+)', key)]
-
- return sorted([x.title for x in checkpoints_list.values()], key=alphanumeric_key)
+def checkpoint_tiles(use_short=False):
+ return [x.short_title if use_short else x.title for x in checkpoints_list.values()]
def list_models():
@@ -137,11 +138,14 @@ def list_models(): elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
- for filename in sorted(model_list, key=str.lower):
+ for filename in model_list:
checkpoint_info = CheckpointInfo(filename)
checkpoint_info.register()
+re_strip_checksum = re.compile(r"\s*\[[^]]+]\s*$")
+
+
def get_closet_checkpoint_match(search_string):
checkpoint_info = checkpoint_aliases.get(search_string, None)
if checkpoint_info is not None:
@@ -151,6 +155,11 @@ def get_closet_checkpoint_match(search_string): if found:
return found[0]
+ search_string_without_checksum = re.sub(re_strip_checksum, '', search_string)
+ found = sorted([info for info in checkpoints_list.values() if search_string_without_checksum in info.title], key=lambda x: len(x.title))
+ if found:
+ return found[0]
+
return None
@@ -353,7 +362,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer sd_vae.delete_base_vae()
sd_vae.clear_loaded_vae()
- vae_file, vae_source = sd_vae.resolve_vae(checkpoint_info.filename)
+ vae_file, vae_source = sd_vae.resolve_vae(checkpoint_info.filename).tuple()
sd_vae.load_vae(model, vae_file, vae_source)
timer.record("load VAE")
@@ -430,6 +439,7 @@ sdxl_refiner_clip_weight = 'conditioner.embedders.0.model.ln_final.weight' class SdModelData:
def __init__(self):
self.sd_model = None
+ self.loaded_sd_models = []
self.was_loaded_at_least_once = False
self.lock = threading.Lock()
@@ -444,6 +454,7 @@ class SdModelData: try:
load_model()
+
except Exception as e:
errors.display(e, "loading stable diffusion model", full_traceback=True)
print("", file=sys.stderr)
@@ -455,11 +466,23 @@ class SdModelData: def set_sd_model(self, v):
self.sd_model = v
+ try:
+ self.loaded_sd_models.remove(v)
+ except ValueError:
+ pass
+
+ if v is not None:
+ self.loaded_sd_models.insert(0, v)
+
model_data = SdModelData()
def get_empty_cond(sd_model):
+
+ p = processing.StableDiffusionProcessingTxt2Img()
+ extra_networks.activate(p, {})
+
if hasattr(sd_model, 'conditioner'):
d = sd_model.get_learned_conditioning([""])
return d['crossattn']
@@ -467,19 +490,39 @@ def get_empty_cond(sd_model): return sd_model.cond_stage_model([""])
+def send_model_to_cpu(m):
+ if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ lowvram.send_everything_to_cpu()
+ else:
+ m.to(devices.cpu)
+
+ devices.torch_gc()
+
+
+def send_model_to_device(m):
+ if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ lowvram.setup_for_low_vram(m, shared.cmd_opts.medvram)
+ else:
+ m.to(shared.device)
+
+
+def send_model_to_trash(m):
+ m.to(device="meta")
+ devices.torch_gc()
+
+
def load_model(checkpoint_info=None, already_loaded_state_dict=None):
- from modules import lowvram, sd_hijack
+ from modules import sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint()
+ timer = Timer()
+
if model_data.sd_model:
- sd_hijack.model_hijack.undo_hijack(model_data.sd_model)
+ send_model_to_trash(model_data.sd_model)
model_data.sd_model = None
- gc.collect()
devices.torch_gc()
- do_inpainting_hijack()
-
- timer = Timer()
+ timer.record("unload existing model")
if already_loaded_state_dict is not None:
state_dict = already_loaded_state_dict
@@ -519,12 +562,9 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): with sd_disable_initialization.LoadStateDictOnMeta(state_dict, devices.cpu):
load_model_weights(sd_model, checkpoint_info, state_dict, timer)
+ timer.record("load weights from state dict")
- if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
- lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
- else:
- sd_model.to(shared.device)
-
+ send_model_to_device(sd_model)
timer.record("move model to device")
sd_hijack.model_hijack.hijack(sd_model)
@@ -532,7 +572,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): timer.record("hijack")
sd_model.eval()
- model_data.sd_model = sd_model
+ model_data.set_sd_model(sd_model)
model_data.was_loaded_at_least_once = True
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model
@@ -553,10 +593,62 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): return sd_model
+def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer):
+ """
+ Checks if the desired checkpoint from checkpoint_info is not already loaded in model_data.loaded_sd_models.
+ If it is loaded, returns that (moving it to GPU if necessary, and moving the currently loadded model to CPU if necessary).
+ If not, returns the model that can be used to load weights from checkpoint_info's file.
+ If no such model exists, returns None.
+ Additionaly deletes loaded models that are over the limit set in settings (sd_checkpoints_limit).
+ """
+
+ already_loaded = None
+ for i in reversed(range(len(model_data.loaded_sd_models))):
+ loaded_model = model_data.loaded_sd_models[i]
+ if loaded_model.sd_checkpoint_info.filename == checkpoint_info.filename:
+ already_loaded = loaded_model
+ continue
+
+ if len(model_data.loaded_sd_models) > shared.opts.sd_checkpoints_limit > 0:
+ print(f"Unloading model {len(model_data.loaded_sd_models)} over the limit of {shared.opts.sd_checkpoints_limit}: {loaded_model.sd_checkpoint_info.title}")
+ model_data.loaded_sd_models.pop()
+ send_model_to_trash(loaded_model)
+ timer.record("send model to trash")
+
+ if shared.opts.sd_checkpoints_keep_in_cpu:
+ send_model_to_cpu(sd_model)
+ timer.record("send model to cpu")
+
+ if already_loaded is not None:
+ send_model_to_device(already_loaded)
+ timer.record("send model to device")
+
+ model_data.set_sd_model(already_loaded)
+ shared.opts.data["sd_model_checkpoint"] = already_loaded.sd_checkpoint_info.title
+ shared.opts.data["sd_checkpoint_hash"] = already_loaded.sd_checkpoint_info.sha256
+ print(f"Using already loaded model {already_loaded.sd_checkpoint_info.title}: done in {timer.summary()}")
+ return model_data.sd_model
+ elif shared.opts.sd_checkpoints_limit > 1 and len(model_data.loaded_sd_models) < shared.opts.sd_checkpoints_limit:
+ print(f"Loading model {checkpoint_info.title} ({len(model_data.loaded_sd_models) + 1} out of {shared.opts.sd_checkpoints_limit})")
+
+ model_data.sd_model = None
+ load_model(checkpoint_info)
+ return model_data.sd_model
+ elif len(model_data.loaded_sd_models) > 0:
+ sd_model = model_data.loaded_sd_models.pop()
+ model_data.sd_model = sd_model
+
+ print(f"Reusing loaded model {sd_model.sd_checkpoint_info.title} to load {checkpoint_info.title}")
+ return sd_model
+ else:
+ return None
+
+
def reload_model_weights(sd_model=None, info=None):
- from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
+ timer = Timer()
+
if not sd_model:
sd_model = model_data.sd_model
@@ -565,19 +657,17 @@ def reload_model_weights(sd_model=None, info=None): else:
current_checkpoint_info = sd_model.sd_checkpoint_info
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
- return
+ return sd_model
- sd_unet.apply_unet("None")
-
- if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
- lowvram.send_everything_to_cpu()
- else:
- sd_model.to(devices.cpu)
+ sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer)
+ if sd_model is not None and sd_model.sd_checkpoint_info.filename == checkpoint_info.filename:
+ return sd_model
+ if sd_model is not None:
+ sd_unet.apply_unet("None")
+ send_model_to_cpu(sd_model)
sd_hijack.model_hijack.undo_hijack(sd_model)
- timer = Timer()
-
state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
@@ -585,7 +675,9 @@ def reload_model_weights(sd_model=None, info=None): timer.record("find config")
if sd_model is None or checkpoint_config != sd_model.used_config:
- del sd_model
+ if sd_model is not None:
+ send_model_to_trash(sd_model)
+
load_model(checkpoint_info, already_loaded_state_dict=state_dict)
return model_data.sd_model
@@ -608,11 +700,13 @@ def reload_model_weights(sd_model=None, info=None): print(f"Weights loaded in {timer.summary()}.")
+ model_data.set_sd_model(sd_model)
+ sd_unet.apply_unet()
+
return sd_model
def unload_model_weights(sd_model=None, info=None):
- from modules import devices, sd_hijack
timer = Timer()
if model_data.sd_model:
diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 8266fa39..08dd03f1 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -2,7 +2,7 @@ import os import torch
-from modules import shared, paths, sd_disable_initialization
+from modules import shared, paths, sd_disable_initialization, devices
sd_configs_path = shared.sd_configs_path
sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion")
@@ -29,7 +29,6 @@ def is_using_v_parameterization_for_sd2(state_dict): """
import ldm.modules.diffusionmodules.openaimodel
- from modules import devices
device = devices.cpu
diff --git a/modules/sd_models_xl.py b/modules/sd_models_xl.py index bc219508..01123321 100644 --- a/modules/sd_models_xl.py +++ b/modules/sd_models_xl.py @@ -98,10 +98,10 @@ def extend_sdxl(model): model.conditioner.wrapped = torch.nn.Module()
-sgm.modules.attention.print = lambda *args: None
-sgm.modules.diffusionmodules.model.print = lambda *args: None
-sgm.modules.diffusionmodules.openaimodel.print = lambda *args: None
-sgm.modules.encoders.modules.print = lambda *args: None
+sgm.modules.attention.print = shared.ldm_print
+sgm.modules.diffusionmodules.model.print = shared.ldm_print
+sgm.modules.diffusionmodules.openaimodel.print = shared.ldm_print
+sgm.modules.encoders.modules.print = shared.ldm_print
# this gets the code to load the vanilla attention that we override
sgm.modules.attention.SDP_IS_AVAILABLE = True
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index bea2684c..45faae62 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -1,17 +1,18 @@ -from modules import sd_samplers_compvis, sd_samplers_kdiffusion, shared
+from modules import sd_samplers_kdiffusion, sd_samplers_timesteps, shared
# imports for functions that previously were here and are used by other modules
from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401
all_samplers = [
*sd_samplers_kdiffusion.samplers_data_k_diffusion,
- *sd_samplers_compvis.samplers_data_compvis,
+ *sd_samplers_timesteps.samplers_data_timesteps,
]
all_samplers_map = {x.name: x for x in all_samplers}
samplers = []
samplers_for_img2img = []
samplers_map = {}
+samplers_hidden = {}
def find_sampler_config(name):
@@ -38,13 +39,11 @@ def create_sampler(name, model): def set_samplers():
- global samplers, samplers_for_img2img
+ global samplers, samplers_for_img2img, samplers_hidden
- hidden = set(shared.opts.hide_samplers)
- hidden_img2img = set(shared.opts.hide_samplers + ['PLMS', 'UniPC'])
-
- samplers = [x for x in all_samplers if x.name not in hidden]
- samplers_for_img2img = [x for x in all_samplers if x.name not in hidden_img2img]
+ samplers_hidden = set(shared.opts.hide_samplers)
+ samplers = all_samplers
+ samplers_for_img2img = all_samplers
samplers_map.clear()
for sampler in all_samplers:
@@ -53,4 +52,8 @@ def set_samplers(): samplers_map[alias.lower()] = sampler.name
+def visible_sampler_names():
+ return [x.name for x in samplers if x.name not in samplers_hidden]
+
+
set_samplers()
diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py new file mode 100644 index 00000000..d826222c --- /dev/null +++ b/modules/sd_samplers_cfg_denoiser.py @@ -0,0 +1,203 @@ +import torch
+from modules import prompt_parser, devices, sd_samplers_common
+
+from modules.shared import opts, state
+import modules.shared as shared
+from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
+from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback
+from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback
+
+
+def catenate_conds(conds):
+ if not isinstance(conds[0], dict):
+ return torch.cat(conds)
+
+ return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()}
+
+
+def subscript_cond(cond, a, b):
+ if not isinstance(cond, dict):
+ return cond[a:b]
+
+ return {key: vec[a:b] for key, vec in cond.items()}
+
+
+def pad_cond(tensor, repeats, empty):
+ if not isinstance(tensor, dict):
+ return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1)
+
+ tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty)
+ return tensor
+
+
+class CFGDenoiser(torch.nn.Module):
+ """
+ Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet)
+ that can take a noisy picture and produce a noise-free picture using two guidances (prompts)
+ instead of one. Originally, the second prompt is just an empty string, but we use non-empty
+ negative prompt.
+ """
+
+ def __init__(self, model, sampler):
+ super().__init__()
+ self.inner_model = model
+ self.mask = None
+ self.nmask = None
+ self.init_latent = None
+ self.step = 0
+ self.image_cfg_scale = None
+ self.padded_cond_uncond = False
+ self.sampler = sampler
+
+ def combine_denoised(self, x_out, conds_list, uncond, cond_scale):
+ denoised_uncond = x_out[-uncond.shape[0]:]
+ denoised = torch.clone(denoised_uncond)
+
+ for i, conds in enumerate(conds_list):
+ for cond_index, weight in conds:
+ denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale)
+
+ return denoised
+
+ def combine_denoised_for_edit_model(self, x_out, cond_scale):
+ out_cond, out_img_cond, out_uncond = x_out.chunk(3)
+ denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond)
+
+ return denoised
+
+ def get_pred_x0(self, x_in, x_out, sigma):
+ return x_out
+
+ def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond):
+ if state.interrupted or state.skipped:
+ raise sd_samplers_common.InterruptedException
+
+ # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling,
+ # so is_edit_model is set to False to support AND composition.
+ is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0
+
+ conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
+ uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
+
+ assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)"
+
+ if self.mask is not None:
+ x = self.init_latent * self.mask + self.nmask * x
+
+ batch_size = len(conds_list)
+ repeats = [len(conds_list[i]) for i in range(batch_size)]
+
+ if shared.sd_model.model.conditioning_key == "crossattn-adm":
+ image_uncond = torch.zeros_like(image_cond)
+ make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm}
+ else:
+ image_uncond = image_cond
+ if isinstance(uncond, dict):
+ make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]}
+ else:
+ make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]}
+
+ if not is_edit_model:
+ x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
+ sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
+ image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond])
+ else:
+ x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x])
+ sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma])
+ image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)])
+
+ denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond)
+ cfg_denoiser_callback(denoiser_params)
+ x_in = denoiser_params.x
+ image_cond_in = denoiser_params.image_cond
+ sigma_in = denoiser_params.sigma
+ tensor = denoiser_params.text_cond
+ uncond = denoiser_params.text_uncond
+ skip_uncond = False
+
+ # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it
+ if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model:
+ skip_uncond = True
+ x_in = x_in[:-batch_size]
+ sigma_in = sigma_in[:-batch_size]
+
+ self.padded_cond_uncond = False
+ if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]:
+ empty = shared.sd_model.cond_stage_model_empty_prompt
+ num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1]
+
+ if num_repeats < 0:
+ tensor = pad_cond(tensor, -num_repeats, empty)
+ self.padded_cond_uncond = True
+ elif num_repeats > 0:
+ uncond = pad_cond(uncond, num_repeats, empty)
+ self.padded_cond_uncond = True
+
+ if tensor.shape[1] == uncond.shape[1] or skip_uncond:
+ if is_edit_model:
+ cond_in = catenate_conds([tensor, uncond, uncond])
+ elif skip_uncond:
+ cond_in = tensor
+ else:
+ cond_in = catenate_conds([tensor, uncond])
+
+ if shared.batch_cond_uncond:
+ x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))
+ else:
+ x_out = torch.zeros_like(x_in)
+ for batch_offset in range(0, x_out.shape[0], batch_size):
+ a = batch_offset
+ b = a + batch_size
+ x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b]))
+ else:
+ x_out = torch.zeros_like(x_in)
+ batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size
+ for batch_offset in range(0, tensor.shape[0], batch_size):
+ a = batch_offset
+ b = min(a + batch_size, tensor.shape[0])
+
+ if not is_edit_model:
+ c_crossattn = subscript_cond(tensor, a, b)
+ else:
+ c_crossattn = torch.cat([tensor[a:b]], uncond)
+
+ x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b]))
+
+ if not skip_uncond:
+ x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:]))
+
+ denoised_image_indexes = [x[0][0] for x in conds_list]
+ if skip_uncond:
+ fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes])
+ x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be
+
+ denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model)
+ cfg_denoised_callback(denoised_params)
+
+ devices.test_for_nans(x_out, "unet")
+
+ if is_edit_model:
+ denoised = self.combine_denoised_for_edit_model(x_out, cond_scale)
+ elif skip_uncond:
+ denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0)
+ else:
+ denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale)
+
+ self.sampler.last_latent = self.get_pred_x0(torch.cat([x_in[i:i + 1] for i in denoised_image_indexes]), torch.cat([x_out[i:i + 1] for i in denoised_image_indexes]), sigma)
+
+ if opts.live_preview_content == "Prompt":
+ preview = self.sampler.last_latent
+ elif opts.live_preview_content == "Negative prompt":
+ preview = self.get_pred_x0(x_in[-uncond.shape[0]:], x_out[-uncond.shape[0]:], sigma)
+ else:
+ preview = self.get_pred_x0(torch.cat([x_in[i:i+1] for i in denoised_image_indexes]), torch.cat([denoised[i:i+1] for i in denoised_image_indexes]), sigma)
+
+ sd_samplers_common.store_latent(preview)
+
+ after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps)
+ cfg_after_cfg_callback(after_cfg_callback_params)
+ denoised = after_cfg_callback_params.x
+
+ self.step += 1
+ return denoised
+
diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index b3d344e7..97bc0804 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -1,9 +1,11 @@ +import inspect
from collections import namedtuple
import numpy as np
import torch
from PIL import Image
from modules import devices, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared
from modules.shared import opts, state
+import k_diffusion.sampling
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
@@ -23,19 +25,29 @@ def setup_img2img_steps(p, steps=None): approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2, "TAESD": 3}
-def single_sample_to_image(sample, approximation=None):
+def samples_to_images_tensor(sample, approximation=None, model=None):
+ '''latents -> images [-1, 1]'''
if approximation is None:
approximation = approximation_indexes.get(opts.show_progress_type, 0)
if approximation == 2:
- x_sample = sd_vae_approx.cheap_approximation(sample) * 0.5 + 0.5
+ x_sample = sd_vae_approx.cheap_approximation(sample)
elif approximation == 1:
- x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() * 0.5 + 0.5
+ x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype)).detach()
elif approximation == 3:
x_sample = sample * 1.5
- x_sample = sd_vae_taesd.model()(x_sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
+ x_sample = sd_vae_taesd.decoder_model()(x_sample.to(devices.device, devices.dtype)).detach()
+ x_sample = x_sample * 2 - 1
else:
- x_sample = decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] * 0.5 + 0.5
+ if model is None:
+ model = shared.sd_model
+ x_sample = model.decode_first_stage(sample.to(model.first_stage_model.dtype))
+
+ return x_sample
+
+
+def single_sample_to_image(sample, approximation=None):
+ x_sample = samples_to_images_tensor(sample.unsqueeze(0), approximation)[0] * 0.5 + 0.5
x_sample = torch.clamp(x_sample, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
@@ -45,9 +57,9 @@ def single_sample_to_image(sample, approximation=None): def decode_first_stage(model, x):
- x = model.decode_first_stage(x.to(devices.dtype_vae))
-
- return x
+ x = x.to(devices.dtype_vae)
+ approx_index = approximation_indexes.get(opts.sd_vae_decode_method, 0)
+ return samples_to_images_tensor(x, approx_index, model)
def sample_to_image(samples, index=0, approximation=None):
@@ -58,6 +70,24 @@ def samples_to_image_grid(samples, approximation=None): return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples])
+def images_tensor_to_samples(image, approximation=None, model=None):
+ '''image[0, 1] -> latent'''
+ if approximation is None:
+ approximation = approximation_indexes.get(opts.sd_vae_encode_method, 0)
+
+ if approximation == 3:
+ image = image.to(devices.device, devices.dtype)
+ x_latent = sd_vae_taesd.encoder_model()(image)
+ else:
+ if model is None:
+ model = shared.sd_model
+ image = image.to(shared.device, dtype=devices.dtype_vae)
+ image = image * 2 - 1
+ x_latent = model.get_first_stage_encoding(model.encode_first_stage(image))
+
+ return x_latent
+
+
def store_latent(decoded):
state.current_latent = decoded
@@ -99,3 +129,139 @@ def replace_torchsde_browinan(): replace_torchsde_browinan()
+
+
+class TorchHijack:
+ """This is here to replace torch.randn_like of k-diffusion.
+
+ k-diffusion has random_sampler argument for most samplers, but not for all, so
+ this is needed to properly replace every use of torch.randn_like.
+
+ We need to replace to make images generated in batches to be same as images generated individually."""
+
+ def __init__(self, p):
+ self.rng = p.rng
+
+ def __getattr__(self, item):
+ if item == 'randn_like':
+ return self.randn_like
+
+ if hasattr(torch, item):
+ return getattr(torch, item)
+
+ raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'")
+
+ def randn_like(self, x):
+ return self.rng.next()
+
+
+class Sampler:
+ def __init__(self, funcname):
+ self.funcname = funcname
+ self.func = funcname
+ self.extra_params = []
+ self.sampler_noises = None
+ self.stop_at = None
+ self.eta = None
+ self.config = None # set by the function calling the constructor
+ self.last_latent = None
+ self.s_min_uncond = None
+ self.s_churn = 0.0
+ self.s_tmin = 0.0
+ self.s_tmax = float('inf')
+ self.s_noise = 1.0
+
+ self.eta_option_field = 'eta_ancestral'
+ self.eta_infotext_field = 'Eta'
+
+ self.conditioning_key = shared.sd_model.model.conditioning_key
+
+ self.model_wrap = None
+ self.model_wrap_cfg = None
+
+ def callback_state(self, d):
+ step = d['i']
+
+ if self.stop_at is not None and step > self.stop_at:
+ raise InterruptedException
+
+ state.sampling_step = step
+ shared.total_tqdm.update()
+
+ def launch_sampling(self, steps, func):
+ state.sampling_steps = steps
+ state.sampling_step = 0
+
+ try:
+ return func()
+ except RecursionError:
+ print(
+ 'Encountered RecursionError during sampling, returning last latent. '
+ 'rho >5 with a polyexponential scheduler may cause this error. '
+ 'You should try to use a smaller rho value instead.'
+ )
+ return self.last_latent
+ except InterruptedException:
+ return self.last_latent
+
+ def number_of_needed_noises(self, p):
+ return p.steps
+
+ def initialize(self, p) -> dict:
+ self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
+ self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
+ self.model_wrap_cfg.step = 0
+ self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
+ self.eta = p.eta if p.eta is not None else getattr(opts, self.eta_option_field, 0.0)
+ self.s_min_uncond = getattr(p, 's_min_uncond', 0.0)
+
+ k_diffusion.sampling.torch = TorchHijack(p)
+
+ extra_params_kwargs = {}
+ for param_name in self.extra_params:
+ if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
+ extra_params_kwargs[param_name] = getattr(p, param_name)
+
+ if 'eta' in inspect.signature(self.func).parameters:
+ if self.eta != 1.0:
+ p.extra_generation_params[self.eta_infotext_field] = self.eta
+
+ extra_params_kwargs['eta'] = self.eta
+
+ if len(self.extra_params) > 0:
+ s_churn = getattr(opts, 's_churn', p.s_churn)
+ s_tmin = getattr(opts, 's_tmin', p.s_tmin)
+ s_tmax = getattr(opts, 's_tmax', p.s_tmax) or self.s_tmax # 0 = inf
+ s_noise = getattr(opts, 's_noise', p.s_noise)
+
+ if s_churn != self.s_churn:
+ extra_params_kwargs['s_churn'] = s_churn
+ p.s_churn = s_churn
+ p.extra_generation_params['Sigma churn'] = s_churn
+ if s_tmin != self.s_tmin:
+ extra_params_kwargs['s_tmin'] = s_tmin
+ p.s_tmin = s_tmin
+ p.extra_generation_params['Sigma tmin'] = s_tmin
+ if s_tmax != self.s_tmax:
+ extra_params_kwargs['s_tmax'] = s_tmax
+ p.s_tmax = s_tmax
+ p.extra_generation_params['Sigma tmax'] = s_tmax
+ if s_noise != self.s_noise:
+ extra_params_kwargs['s_noise'] = s_noise
+ p.s_noise = s_noise
+ p.extra_generation_params['Sigma noise'] = s_noise
+
+ return extra_params_kwargs
+
+ def create_noise_sampler(self, x, sigmas, p):
+ """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes"""
+ if shared.opts.no_dpmpp_sde_batch_determinism:
+ return None
+
+ from k_diffusion.sampling import BrownianTreeNoiseSampler
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
+ current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size]
+ return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds)
+
+
+
diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py deleted file mode 100644 index 4a8396f9..00000000 --- a/modules/sd_samplers_compvis.py +++ /dev/null @@ -1,224 +0,0 @@ -import math
-import ldm.models.diffusion.ddim
-import ldm.models.diffusion.plms
-
-import numpy as np
-import torch
-
-from modules.shared import state
-from modules import sd_samplers_common, prompt_parser, shared
-import modules.models.diffusion.uni_pc
-
-
-samplers_data_compvis = [
- sd_samplers_common.SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {"default_eta_is_0": True, "uses_ensd": True, "no_sdxl": True}),
- sd_samplers_common.SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {"no_sdxl": True}),
- sd_samplers_common.SamplerData('UniPC', lambda model: VanillaStableDiffusionSampler(modules.models.diffusion.uni_pc.UniPCSampler, model), [], {"no_sdxl": True}),
-]
-
-
-class VanillaStableDiffusionSampler:
- def __init__(self, constructor, sd_model):
- self.sampler = constructor(sd_model)
- self.is_ddim = hasattr(self.sampler, 'p_sample_ddim')
- self.is_plms = hasattr(self.sampler, 'p_sample_plms')
- self.is_unipc = isinstance(self.sampler, modules.models.diffusion.uni_pc.UniPCSampler)
- self.orig_p_sample_ddim = None
- if self.is_plms:
- self.orig_p_sample_ddim = self.sampler.p_sample_plms
- elif self.is_ddim:
- self.orig_p_sample_ddim = self.sampler.p_sample_ddim
- self.mask = None
- self.nmask = None
- self.init_latent = None
- self.sampler_noises = None
- self.step = 0
- self.stop_at = None
- self.eta = None
- self.config = None
- self.last_latent = None
-
- self.conditioning_key = sd_model.model.conditioning_key
-
- def number_of_needed_noises(self, p):
- return 0
-
- def launch_sampling(self, steps, func):
- state.sampling_steps = steps
- state.sampling_step = 0
-
- try:
- return func()
- except sd_samplers_common.InterruptedException:
- return self.last_latent
-
- def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
- x_dec, ts, cond, unconditional_conditioning = self.before_sample(x_dec, ts, cond, unconditional_conditioning)
-
- res = self.orig_p_sample_ddim(x_dec, cond, ts, *args, unconditional_conditioning=unconditional_conditioning, **kwargs)
-
- x_dec, ts, cond, unconditional_conditioning, res = self.after_sample(x_dec, ts, cond, unconditional_conditioning, res)
-
- return res
-
- def before_sample(self, x, ts, cond, unconditional_conditioning):
- if state.interrupted or state.skipped:
- raise sd_samplers_common.InterruptedException
-
- if self.stop_at is not None and self.step > self.stop_at:
- raise sd_samplers_common.InterruptedException
-
- # Have to unwrap the inpainting conditioning here to perform pre-processing
- image_conditioning = None
- uc_image_conditioning = None
- if isinstance(cond, dict):
- if self.conditioning_key == "crossattn-adm":
- image_conditioning = cond["c_adm"]
- uc_image_conditioning = unconditional_conditioning["c_adm"]
- else:
- image_conditioning = cond["c_concat"][0]
- cond = cond["c_crossattn"][0]
- unconditional_conditioning = unconditional_conditioning["c_crossattn"][0]
-
- conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
- unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
-
- assert all(len(conds) == 1 for conds in conds_list), 'composition via AND is not supported for DDIM/PLMS samplers'
- cond = tensor
-
- # for DDIM, shapes must match, we can't just process cond and uncond independently;
- # filling unconditional_conditioning with repeats of the last vector to match length is
- # not 100% correct but should work well enough
- if unconditional_conditioning.shape[1] < cond.shape[1]:
- last_vector = unconditional_conditioning[:, -1:]
- last_vector_repeated = last_vector.repeat([1, cond.shape[1] - unconditional_conditioning.shape[1], 1])
- unconditional_conditioning = torch.hstack([unconditional_conditioning, last_vector_repeated])
- elif unconditional_conditioning.shape[1] > cond.shape[1]:
- unconditional_conditioning = unconditional_conditioning[:, :cond.shape[1]]
-
- if self.mask is not None:
- img_orig = self.sampler.model.q_sample(self.init_latent, ts)
- x = img_orig * self.mask + self.nmask * x
-
- # Wrap the image conditioning back up since the DDIM code can accept the dict directly.
- # Note that they need to be lists because it just concatenates them later.
- if image_conditioning is not None:
- if self.conditioning_key == "crossattn-adm":
- cond = {"c_adm": image_conditioning, "c_crossattn": [cond]}
- unconditional_conditioning = {"c_adm": uc_image_conditioning, "c_crossattn": [unconditional_conditioning]}
- else:
- cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]}
- unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
-
- return x, ts, cond, unconditional_conditioning
-
- def update_step(self, last_latent):
- if self.mask is not None:
- self.last_latent = self.init_latent * self.mask + self.nmask * last_latent
- else:
- self.last_latent = last_latent
-
- sd_samplers_common.store_latent(self.last_latent)
-
- self.step += 1
- state.sampling_step = self.step
- shared.total_tqdm.update()
-
- def after_sample(self, x, ts, cond, uncond, res):
- if not self.is_unipc:
- self.update_step(res[1])
-
- return x, ts, cond, uncond, res
-
- def unipc_after_update(self, x, model_x):
- self.update_step(x)
-
- def initialize(self, p):
- if self.is_ddim:
- self.eta = p.eta if p.eta is not None else shared.opts.eta_ddim
- else:
- self.eta = 0.0
-
- if self.eta != 0.0:
- p.extra_generation_params["Eta DDIM"] = self.eta
-
- if self.is_unipc:
- keys = [
- ('UniPC variant', 'uni_pc_variant'),
- ('UniPC skip type', 'uni_pc_skip_type'),
- ('UniPC order', 'uni_pc_order'),
- ('UniPC lower order final', 'uni_pc_lower_order_final'),
- ]
-
- for name, key in keys:
- v = getattr(shared.opts, key)
- if v != shared.opts.get_default(key):
- p.extra_generation_params[name] = v
-
- for fieldname in ['p_sample_ddim', 'p_sample_plms']:
- if hasattr(self.sampler, fieldname):
- setattr(self.sampler, fieldname, self.p_sample_ddim_hook)
- if self.is_unipc:
- self.sampler.set_hooks(lambda x, t, c, u: self.before_sample(x, t, c, u), lambda x, t, c, u, r: self.after_sample(x, t, c, u, r), lambda x, mx: self.unipc_after_update(x, mx))
-
- self.mask = p.mask if hasattr(p, 'mask') else None
- self.nmask = p.nmask if hasattr(p, 'nmask') else None
-
-
- def adjust_steps_if_invalid(self, p, num_steps):
- if ((self.config.name == 'DDIM') and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS') or (self.config.name == 'UniPC'):
- if self.config.name == 'UniPC' and num_steps < shared.opts.uni_pc_order:
- num_steps = shared.opts.uni_pc_order
- valid_step = 999 / (1000 // num_steps)
- if valid_step == math.floor(valid_step):
- return int(valid_step) + 1
-
- return num_steps
-
- def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
- steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps)
- steps = self.adjust_steps_if_invalid(p, steps)
- self.initialize(p)
-
- self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
- x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
-
- self.init_latent = x
- self.last_latent = x
- self.step = 0
-
- # Wrap the conditioning models with additional image conditioning for inpainting model
- if image_conditioning is not None:
- if self.conditioning_key == "crossattn-adm":
- conditioning = {"c_adm": image_conditioning, "c_crossattn": [conditioning]}
- unconditional_conditioning = {"c_adm": torch.zeros_like(image_conditioning), "c_crossattn": [unconditional_conditioning]}
- else:
- conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
- unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
-
- samples = self.launch_sampling(t_enc + 1, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
-
- return samples
-
- def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
- self.initialize(p)
-
- self.init_latent = None
- self.last_latent = x
- self.step = 0
-
- steps = self.adjust_steps_if_invalid(p, steps or p.steps)
-
- # Wrap the conditioning models with additional image conditioning for inpainting model
- # dummy_for_plms is needed because PLMS code checks the first item in the dict to have the right shape
- if image_conditioning is not None:
- if self.conditioning_key == "crossattn-adm":
- conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_adm": image_conditioning}
- unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_adm": torch.zeros_like(image_conditioning)}
- else:
- conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_concat": [image_conditioning]}
- unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_concat": [image_conditioning]}
-
- samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
-
- return samples_ddim
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 8bb639f5..5613b8c1 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -1,16 +1,17 @@ -from collections import deque
import torch
import inspect
import k_diffusion.sampling
-from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_extra
+from modules import sd_samplers_common, sd_samplers_extra
+from modules.sd_samplers_cfg_denoiser import CFGDenoiser
-from modules.shared import opts, state
+from modules.shared import opts
import modules.shared as shared
-from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
-from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback
-from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback
samplers_k_diffusion = [
+ ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
+ ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}),
+ ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}),
+ ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}),
('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}),
('Euler', 'sample_euler', ['k_euler'], {}),
('LMS', 'sample_lms', ['k_lms'], {}),
@@ -27,10 +28,6 @@ samplers_k_diffusion = [ ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}),
('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}),
('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}),
- ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
- ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}),
- ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}),
- ('DPM++ 2M SDE Exponential', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_exp'], {'scheduler': 'exponential', "brownian_noise": True}),
('Restart', sd_samplers_extra.restart_sampler, ['restart'], {'scheduler': 'karras'}),
]
@@ -56,286 +53,17 @@ k_diffusion_scheduler = { }
-def catenate_conds(conds):
- if not isinstance(conds[0], dict):
- return torch.cat(conds)
-
- return {key: torch.cat([x[key] for x in conds]) for key in conds[0].keys()}
-
-
-def subscript_cond(cond, a, b):
- if not isinstance(cond, dict):
- return cond[a:b]
-
- return {key: vec[a:b] for key, vec in cond.items()}
-
-
-def pad_cond(tensor, repeats, empty):
- if not isinstance(tensor, dict):
- return torch.cat([tensor, empty.repeat((tensor.shape[0], repeats, 1))], axis=1)
-
- tensor['crossattn'] = pad_cond(tensor['crossattn'], repeats, empty)
- return tensor
-
-
-class CFGDenoiser(torch.nn.Module):
- """
- Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet)
- that can take a noisy picture and produce a noise-free picture using two guidances (prompts)
- instead of one. Originally, the second prompt is just an empty string, but we use non-empty
- negative prompt.
- """
-
- def __init__(self, model):
- super().__init__()
- self.inner_model = model
- self.mask = None
- self.nmask = None
- self.init_latent = None
- self.step = 0
- self.image_cfg_scale = None
- self.padded_cond_uncond = False
-
- def combine_denoised(self, x_out, conds_list, uncond, cond_scale):
- denoised_uncond = x_out[-uncond.shape[0]:]
- denoised = torch.clone(denoised_uncond)
-
- for i, conds in enumerate(conds_list):
- for cond_index, weight in conds:
- denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale)
-
- return denoised
-
- def combine_denoised_for_edit_model(self, x_out, cond_scale):
- out_cond, out_img_cond, out_uncond = x_out.chunk(3)
- denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond)
-
- return denoised
-
- def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond):
- if state.interrupted or state.skipped:
- raise sd_samplers_common.InterruptedException
-
- # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling,
- # so is_edit_model is set to False to support AND composition.
- is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0
-
- conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
- uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
-
- assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)"
-
- batch_size = len(conds_list)
- repeats = [len(conds_list[i]) for i in range(batch_size)]
-
- if shared.sd_model.model.conditioning_key == "crossattn-adm":
- image_uncond = torch.zeros_like(image_cond)
- make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm}
- else:
- image_uncond = image_cond
- if isinstance(uncond, dict):
- make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]}
- else:
- make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]}
-
- if not is_edit_model:
- x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
- sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
- image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond])
- else:
- x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x])
- sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma])
- image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)])
-
- denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond)
- cfg_denoiser_callback(denoiser_params)
- x_in = denoiser_params.x
- image_cond_in = denoiser_params.image_cond
- sigma_in = denoiser_params.sigma
- tensor = denoiser_params.text_cond
- uncond = denoiser_params.text_uncond
- skip_uncond = False
-
- # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it
- if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model:
- skip_uncond = True
- x_in = x_in[:-batch_size]
- sigma_in = sigma_in[:-batch_size]
-
- self.padded_cond_uncond = False
- if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]:
- empty = shared.sd_model.cond_stage_model_empty_prompt
- num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1]
-
- if num_repeats < 0:
- tensor = pad_cond(tensor, -num_repeats, empty)
- self.padded_cond_uncond = True
- elif num_repeats > 0:
- uncond = pad_cond(uncond, num_repeats, empty)
- self.padded_cond_uncond = True
-
- if tensor.shape[1] == uncond.shape[1] or skip_uncond:
- if is_edit_model:
- cond_in = catenate_conds([tensor, uncond, uncond])
- elif skip_uncond:
- cond_in = tensor
- else:
- cond_in = catenate_conds([tensor, uncond])
-
- if shared.batch_cond_uncond:
- x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))
- else:
- x_out = torch.zeros_like(x_in)
- for batch_offset in range(0, x_out.shape[0], batch_size):
- a = batch_offset
- b = a + batch_size
- x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(subscript_cond(cond_in, a, b), image_cond_in[a:b]))
- else:
- x_out = torch.zeros_like(x_in)
- batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size
- for batch_offset in range(0, tensor.shape[0], batch_size):
- a = batch_offset
- b = min(a + batch_size, tensor.shape[0])
-
- if not is_edit_model:
- c_crossattn = subscript_cond(tensor, a, b)
- else:
- c_crossattn = torch.cat([tensor[a:b]], uncond)
-
- x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b]))
-
- if not skip_uncond:
- x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:]))
-
- denoised_image_indexes = [x[0][0] for x in conds_list]
- if skip_uncond:
- fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes])
- x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be
-
- denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model)
- cfg_denoised_callback(denoised_params)
-
- devices.test_for_nans(x_out, "unet")
-
- if opts.live_preview_content == "Prompt":
- sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes]))
- elif opts.live_preview_content == "Negative prompt":
- sd_samplers_common.store_latent(x_out[-uncond.shape[0]:])
-
- if is_edit_model:
- denoised = self.combine_denoised_for_edit_model(x_out, cond_scale)
- elif skip_uncond:
- denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0)
- else:
- denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale)
-
- if self.mask is not None:
- denoised = self.init_latent * self.mask + self.nmask * denoised
-
- after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps)
- cfg_after_cfg_callback(after_cfg_callback_params)
- denoised = after_cfg_callback_params.x
-
- self.step += 1
- return denoised
-
-
-class TorchHijack:
- def __init__(self, sampler_noises):
- # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based
- # implementation.
- self.sampler_noises = deque(sampler_noises)
-
- def __getattr__(self, item):
- if item == 'randn_like':
- return self.randn_like
-
- if hasattr(torch, item):
- return getattr(torch, item)
-
- raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'")
-
- def randn_like(self, x):
- if self.sampler_noises:
- noise = self.sampler_noises.popleft()
- if noise.shape == x.shape:
- return noise
+class KDiffusionSampler(sd_samplers_common.Sampler):
+ def __init__(self, funcname, sd_model):
- return devices.randn_like(x)
+ super().__init__(funcname)
+ self.extra_params = sampler_extra_params.get(funcname, [])
+ self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname)
-class KDiffusionSampler:
- def __init__(self, funcname, sd_model):
denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser
-
self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization)
- self.funcname = funcname
- self.func = funcname if callable(funcname) else getattr(k_diffusion.sampling, self.funcname)
- self.extra_params = sampler_extra_params.get(funcname, [])
- self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
- self.sampler_noises = None
- self.stop_at = None
- self.eta = None
- self.config = None # set by the function calling the constructor
- self.last_latent = None
- self.s_min_uncond = None
-
- self.conditioning_key = sd_model.model.conditioning_key
-
- def callback_state(self, d):
- step = d['i']
- latent = d["denoised"]
- if opts.live_preview_content == "Combined":
- sd_samplers_common.store_latent(latent)
- self.last_latent = latent
-
- if self.stop_at is not None and step > self.stop_at:
- raise sd_samplers_common.InterruptedException
-
- state.sampling_step = step
- shared.total_tqdm.update()
-
- def launch_sampling(self, steps, func):
- state.sampling_steps = steps
- state.sampling_step = 0
-
- try:
- return func()
- except RecursionError:
- print(
- 'Encountered RecursionError during sampling, returning last latent. '
- 'rho >5 with a polyexponential scheduler may cause this error. '
- 'You should try to use a smaller rho value instead.'
- )
- return self.last_latent
- except sd_samplers_common.InterruptedException:
- return self.last_latent
-
- def number_of_needed_noises(self, p):
- return p.steps
-
- def initialize(self, p):
- self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
- self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
- self.model_wrap_cfg.step = 0
- self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
- self.eta = p.eta if p.eta is not None else opts.eta_ancestral
- self.s_min_uncond = getattr(p, 's_min_uncond', 0.0)
-
- k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else [])
-
- extra_params_kwargs = {}
- for param_name in self.extra_params:
- if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
- extra_params_kwargs[param_name] = getattr(p, param_name)
-
- if 'eta' in inspect.signature(self.func).parameters:
- if self.eta != 1.0:
- p.extra_generation_params["Eta"] = self.eta
-
- extra_params_kwargs['eta'] = self.eta
-
- return extra_params_kwargs
+ self.model_wrap_cfg = CFGDenoiser(self.model_wrap, self)
def get_sigmas(self, p, steps):
discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False)
@@ -387,22 +115,12 @@ class KDiffusionSampler: return sigmas
- def create_noise_sampler(self, x, sigmas, p):
- """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes"""
- if shared.opts.no_dpmpp_sde_batch_determinism:
- return None
-
- from k_diffusion.sampling import BrownianTreeNoiseSampler
- sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
- current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size]
- return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds)
-
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps)
sigmas = self.get_sigmas(p, steps)
-
sigma_sched = sigmas[steps - t_enc - 1:]
+
xi = x + noise * sigma_sched[0]
extra_params_kwargs = self.initialize(p)
@@ -451,12 +169,14 @@ class KDiffusionSampler: extra_params_kwargs = self.initialize(p)
parameters = inspect.signature(self.func).parameters
+ if 'n' in parameters:
+ extra_params_kwargs['n'] = steps
+
if 'sigma_min' in parameters:
extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()
extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()
- if 'n' in parameters:
- extra_params_kwargs['n'] = steps
- else:
+
+ if 'sigmas' in parameters:
extra_params_kwargs['sigmas'] = sigmas
if self.config.options.get('brownian_noise', False):
@@ -477,3 +197,4 @@ class KDiffusionSampler: return samples
+
diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py new file mode 100644 index 00000000..f61799a8 --- /dev/null +++ b/modules/sd_samplers_timesteps.py @@ -0,0 +1,151 @@ +import torch
+import inspect
+import sys
+from modules import devices, sd_samplers_common, sd_samplers_timesteps_impl
+from modules.sd_samplers_cfg_denoiser import CFGDenoiser
+
+from modules.shared import opts
+import modules.shared as shared
+
+samplers_timesteps = [
+ ('DDIM', sd_samplers_timesteps_impl.ddim, ['ddim'], {}),
+ ('PLMS', sd_samplers_timesteps_impl.plms, ['plms'], {}),
+ ('UniPC', sd_samplers_timesteps_impl.unipc, ['unipc'], {}),
+]
+
+
+samplers_data_timesteps = [
+ sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: CompVisSampler(funcname, model), aliases, options)
+ for label, funcname, aliases, options in samplers_timesteps
+]
+
+
+class CompVisTimestepsDenoiser(torch.nn.Module):
+ def __init__(self, model, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.inner_model = model
+
+ def forward(self, input, timesteps, **kwargs):
+ return self.inner_model.apply_model(input, timesteps, **kwargs)
+
+
+class CompVisTimestepsVDenoiser(torch.nn.Module):
+ def __init__(self, model, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.inner_model = model
+
+ def predict_eps_from_z_and_v(self, x_t, t, v):
+ return self.inner_model.sqrt_alphas_cumprod[t.to(torch.int), None, None, None] * v + self.inner_model.sqrt_one_minus_alphas_cumprod[t.to(torch.int), None, None, None] * x_t
+
+ def forward(self, input, timesteps, **kwargs):
+ model_output = self.inner_model.apply_model(input, timesteps, **kwargs)
+ e_t = self.predict_eps_from_z_and_v(input, timesteps, model_output)
+ return e_t
+
+
+class CFGDenoiserTimesteps(CFGDenoiser):
+
+ def __init__(self, model, sampler):
+ super().__init__(model, sampler)
+
+ self.alphas = model.inner_model.alphas_cumprod
+
+ def get_pred_x0(self, x_in, x_out, sigma):
+ ts = int(sigma.item())
+
+ s_in = x_in.new_ones([x_in.shape[0]])
+ a_t = self.alphas[ts].item() * s_in
+ sqrt_one_minus_at = (1 - a_t).sqrt()
+
+ pred_x0 = (x_in - sqrt_one_minus_at * x_out) / a_t.sqrt()
+
+ return pred_x0
+
+
+class CompVisSampler(sd_samplers_common.Sampler):
+ def __init__(self, funcname, sd_model):
+ super().__init__(funcname)
+
+ self.eta_option_field = 'eta_ddim'
+ self.eta_infotext_field = 'Eta DDIM'
+
+ denoiser = CompVisTimestepsVDenoiser if sd_model.parameterization == "v" else CompVisTimestepsDenoiser
+ self.model_wrap = denoiser(sd_model)
+ self.model_wrap_cfg = CFGDenoiserTimesteps(self.model_wrap, self)
+
+ def get_timesteps(self, p, steps):
+ discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False)
+ if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma:
+ discard_next_to_last_sigma = True
+ p.extra_generation_params["Discard penultimate sigma"] = True
+
+ steps += 1 if discard_next_to_last_sigma else 0
+
+ timesteps = torch.clip(torch.asarray(list(range(0, 1000, 1000 // steps)), device=devices.device) + 1, 0, 999)
+
+ return timesteps
+
+ def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
+ steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps)
+
+ timesteps = self.get_timesteps(p, steps)
+ timesteps_sched = timesteps[:t_enc]
+
+ alphas_cumprod = shared.sd_model.alphas_cumprod
+ sqrt_alpha_cumprod = torch.sqrt(alphas_cumprod[timesteps[t_enc]])
+ sqrt_one_minus_alpha_cumprod = torch.sqrt(1 - alphas_cumprod[timesteps[t_enc]])
+
+ xi = x * sqrt_alpha_cumprod + noise * sqrt_one_minus_alpha_cumprod
+
+ extra_params_kwargs = self.initialize(p)
+ parameters = inspect.signature(self.func).parameters
+
+ if 'timesteps' in parameters:
+ extra_params_kwargs['timesteps'] = timesteps_sched
+ if 'is_img2img' in parameters:
+ extra_params_kwargs['is_img2img'] = True
+
+ self.model_wrap_cfg.init_latent = x
+ self.last_latent = x
+ extra_args = {
+ 'cond': conditioning,
+ 'image_cond': image_conditioning,
+ 'uncond': unconditional_conditioning,
+ 'cond_scale': p.cfg_scale,
+ 's_min_uncond': self.s_min_uncond
+ }
+
+ samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
+
+ if self.model_wrap_cfg.padded_cond_uncond:
+ p.extra_generation_params["Pad conds"] = True
+
+ return samples
+
+ def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
+ steps = steps or p.steps
+ timesteps = self.get_timesteps(p, steps)
+
+ extra_params_kwargs = self.initialize(p)
+ parameters = inspect.signature(self.func).parameters
+
+ if 'timesteps' in parameters:
+ extra_params_kwargs['timesteps'] = timesteps
+
+ self.last_latent = x
+ samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
+ 'cond': conditioning,
+ 'image_cond': image_conditioning,
+ 'uncond': unconditional_conditioning,
+ 'cond_scale': p.cfg_scale,
+ 's_min_uncond': self.s_min_uncond
+ }, disable=False, callback=self.callback_state, **extra_params_kwargs))
+
+ if self.model_wrap_cfg.padded_cond_uncond:
+ p.extra_generation_params["Pad conds"] = True
+
+ return samples
+
+
+sys.modules['modules.sd_samplers_compvis'] = sys.modules[__name__]
+VanillaStableDiffusionSampler = CompVisSampler # temp. compatibility with older extensions
diff --git a/modules/sd_samplers_timesteps_impl.py b/modules/sd_samplers_timesteps_impl.py new file mode 100644 index 00000000..48d7e649 --- /dev/null +++ b/modules/sd_samplers_timesteps_impl.py @@ -0,0 +1,135 @@ +import torch
+import tqdm
+import k_diffusion.sampling
+import numpy as np
+
+from modules import shared
+from modules.models.diffusion.uni_pc import uni_pc
+
+
+@torch.no_grad()
+def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=0.0):
+ alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
+ alphas = alphas_cumprod[timesteps]
+ alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64)
+ sqrt_one_minus_alphas = torch.sqrt(1 - alphas)
+ sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy()))
+
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ for i in tqdm.trange(len(timesteps) - 1, disable=disable):
+ index = len(timesteps) - 1 - i
+
+ e_t = model(x, timesteps[index].item() * s_in, **extra_args)
+
+ a_t = alphas[index].item() * s_in
+ a_prev = alphas_prev[index].item() * s_in
+ sigma_t = sigmas[index].item() * s_in
+ sqrt_one_minus_at = sqrt_one_minus_alphas[index].item() * s_in
+
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
+ dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t
+ noise = sigma_t * k_diffusion.sampling.torch.randn_like(x)
+ x = a_prev.sqrt() * pred_x0 + dir_xt + noise
+
+ if callback is not None:
+ callback({'x': x, 'i': i, 'sigma': 0, 'sigma_hat': 0, 'denoised': pred_x0})
+
+ return x
+
+
+@torch.no_grad()
+def plms(model, x, timesteps, extra_args=None, callback=None, disable=None):
+ alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
+ alphas = alphas_cumprod[timesteps]
+ alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64)
+ sqrt_one_minus_alphas = torch.sqrt(1 - alphas)
+
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ old_eps = []
+
+ def get_x_prev_and_pred_x0(e_t, index):
+ # select parameters corresponding to the currently considered timestep
+ a_t = alphas[index].item() * s_in
+ a_prev = alphas_prev[index].item() * s_in
+ sqrt_one_minus_at = sqrt_one_minus_alphas[index].item() * s_in
+
+ # current prediction for x_0
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
+
+ # direction pointing to x_t
+ dir_xt = (1. - a_prev).sqrt() * e_t
+ x_prev = a_prev.sqrt() * pred_x0 + dir_xt
+ return x_prev, pred_x0
+
+ for i in tqdm.trange(len(timesteps) - 1, disable=disable):
+ index = len(timesteps) - 1 - i
+ ts = timesteps[index].item() * s_in
+ t_next = timesteps[max(index - 1, 0)].item() * s_in
+
+ e_t = model(x, ts, **extra_args)
+
+ if len(old_eps) == 0:
+ # Pseudo Improved Euler (2nd order)
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
+ e_t_next = model(x_prev, t_next, **extra_args)
+ e_t_prime = (e_t + e_t_next) / 2
+ elif len(old_eps) == 1:
+ # 2nd order Pseudo Linear Multistep (Adams-Bashforth)
+ e_t_prime = (3 * e_t - old_eps[-1]) / 2
+ elif len(old_eps) == 2:
+ # 3nd order Pseudo Linear Multistep (Adams-Bashforth)
+ e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
+ else:
+ # 4nd order Pseudo Linear Multistep (Adams-Bashforth)
+ e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
+
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
+
+ old_eps.append(e_t)
+ if len(old_eps) >= 4:
+ old_eps.pop(0)
+
+ x = x_prev
+
+ if callback is not None:
+ callback({'x': x, 'i': i, 'sigma': 0, 'sigma_hat': 0, 'denoised': pred_x0})
+
+ return x
+
+
+class UniPCCFG(uni_pc.UniPC):
+ def __init__(self, cfg_model, extra_args, callback, *args, **kwargs):
+ super().__init__(None, *args, **kwargs)
+
+ def after_update(x, model_x):
+ callback({'x': x, 'i': self.index, 'sigma': 0, 'sigma_hat': 0, 'denoised': model_x})
+ self.index += 1
+
+ self.cfg_model = cfg_model
+ self.extra_args = extra_args
+ self.callback = callback
+ self.index = 0
+ self.after_update = after_update
+
+ def get_model_input_time(self, t_continuous):
+ return (t_continuous - 1. / self.noise_schedule.total_N) * 1000.
+
+ def model(self, x, t):
+ t_input = self.get_model_input_time(t)
+
+ res = self.cfg_model(x, t_input, **self.extra_args)
+
+ return res
+
+
+def unipc(model, x, timesteps, extra_args=None, callback=None, disable=None, is_img2img=False):
+ alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
+
+ ns = uni_pc.NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
+ t_start = timesteps[-1] / 1000 + 1 / 1000 if is_img2img else None # this is likely off by a bit - if someone wants to fix it please by all means
+ unipc_sampler = UniPCCFG(model, extra_args, callback, ns, predict_x0=True, thresholding=False, variant=shared.opts.uni_pc_variant)
+ x = unipc_sampler.sample(x, steps=len(timesteps), t_start=t_start, skip_type=shared.opts.uni_pc_skip_type, method="multistep", order=shared.opts.uni_pc_order, lower_order_final=shared.opts.uni_pc_lower_order_final)
+
+ return x
diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 84271db0..5ac1ac31 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -1,6 +1,9 @@ import os import collections -from modules import paths, shared, devices, script_callbacks, sd_models +from dataclasses import dataclass + +from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks, lowvram, sd_hijack + import glob from copy import deepcopy @@ -16,6 +19,7 @@ checkpoint_info = None checkpoints_loaded = collections.OrderedDict() + def get_base_vae(model): if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model: return base_vae @@ -96,27 +100,74 @@ def find_vae_near_checkpoint(checkpoint_file): return None -def resolve_vae(checkpoint_file): - if shared.cmd_opts.vae_path is not None: - return shared.cmd_opts.vae_path, 'from commandline argument' +@dataclass +class VaeResolution: + vae: str = None + source: str = None + resolved: bool = True - is_automatic = shared.opts.sd_vae in {"Automatic", "auto"} # "auto" for people with old config + def tuple(self): + return self.vae, self.source - vae_near_checkpoint = find_vae_near_checkpoint(checkpoint_file) - if vae_near_checkpoint is not None and (shared.opts.sd_vae_as_default or is_automatic): - return vae_near_checkpoint, 'found near the checkpoint' +def is_automatic(): + return shared.opts.sd_vae in {"Automatic", "auto"} # "auto" for people with old config + + +def resolve_vae_from_setting() -> VaeResolution: if shared.opts.sd_vae == "None": - return None, None + return VaeResolution() vae_from_options = vae_dict.get(shared.opts.sd_vae, None) if vae_from_options is not None: - return vae_from_options, 'specified in settings' + return VaeResolution(vae_from_options, 'specified in settings') - if not is_automatic: + if not is_automatic(): print(f"Couldn't find VAE named {shared.opts.sd_vae}; using None instead") - return None, None + return VaeResolution(resolved=False) + + +def resolve_vae_from_user_metadata(checkpoint_file) -> VaeResolution: + metadata = extra_networks.get_user_metadata(checkpoint_file) + vae_metadata = metadata.get("vae", None) + if vae_metadata is not None and vae_metadata != "Automatic": + if vae_metadata == "None": + return VaeResolution() + + vae_from_metadata = vae_dict.get(vae_metadata, None) + if vae_from_metadata is not None: + return VaeResolution(vae_from_metadata, "from user metadata") + + return VaeResolution(resolved=False) + + +def resolve_vae_near_checkpoint(checkpoint_file) -> VaeResolution: + vae_near_checkpoint = find_vae_near_checkpoint(checkpoint_file) + if vae_near_checkpoint is not None and (shared.opts.sd_vae_as_default or is_automatic): + return VaeResolution(vae_near_checkpoint, 'found near the checkpoint') + + return VaeResolution(resolved=False) + + +def resolve_vae(checkpoint_file) -> VaeResolution: + if shared.cmd_opts.vae_path is not None: + return VaeResolution(shared.cmd_opts.vae_path, 'from commandline argument') + + if shared.opts.sd_vae_overrides_per_model_preferences and not is_automatic(): + return resolve_vae_from_setting() + + res = resolve_vae_from_user_metadata(checkpoint_file) + if res.resolved: + return res + + res = resolve_vae_near_checkpoint(checkpoint_file) + if res.resolved: + return res + + res = resolve_vae_from_setting() + + return res def load_vae_dict(filename, map_location): @@ -181,8 +232,6 @@ unspecified = object() def reload_vae_weights(sd_model=None, vae_file=unspecified): - from modules import lowvram, devices, sd_hijack - if not sd_model: sd_model = shared.sd_model @@ -190,7 +239,7 @@ def reload_vae_weights(sd_model=None, vae_file=unspecified): checkpoint_file = checkpoint_info.filename if vae_file == unspecified: - vae_file, vae_source = resolve_vae(checkpoint_file) + vae_file, vae_source = resolve_vae(checkpoint_file).tuple() else: vae_source = "from function argument" diff --git a/modules/sd_vae_approx.py b/modules/sd_vae_approx.py index 86bd658a..3965e223 100644 --- a/modules/sd_vae_approx.py +++ b/modules/sd_vae_approx.py @@ -81,6 +81,6 @@ def cheap_approximation(sample): coefs = torch.tensor(coeffs).to(sample.device)
- x_sample = torch.einsum("lxy,lr -> rxy", sample, coefs)
+ x_sample = torch.einsum("...lxy,lr -> ...rxy", sample, coefs)
return x_sample
diff --git a/modules/sd_vae_taesd.py b/modules/sd_vae_taesd.py index 5bf7c76e..808eb362 100644 --- a/modules/sd_vae_taesd.py +++ b/modules/sd_vae_taesd.py @@ -44,7 +44,17 @@ def decoder(): ) -class TAESD(nn.Module): +def encoder(): + return nn.Sequential( + conv(3, 64), Block(64, 64), + conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64), + conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64), + conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64), + conv(64, 4), + ) + + +class TAESDDecoder(nn.Module): latent_magnitude = 3 latent_shift = 0.5 @@ -55,21 +65,28 @@ class TAESD(nn.Module): self.decoder.load_state_dict( torch.load(decoder_path, map_location='cpu' if devices.device.type != 'cuda' else None)) - @staticmethod - def unscale_latents(x): - """[0, 1] -> raw latents""" - return x.sub(TAESD.latent_shift).mul(2 * TAESD.latent_magnitude) + +class TAESDEncoder(nn.Module): + latent_magnitude = 3 + latent_shift = 0.5 + + def __init__(self, encoder_path="taesd_encoder.pth"): + """Initialize pretrained TAESD on the given device from the given checkpoints.""" + super().__init__() + self.encoder = encoder() + self.encoder.load_state_dict( + torch.load(encoder_path, map_location='cpu' if devices.device.type != 'cuda' else None)) def download_model(model_path, model_url): if not os.path.exists(model_path): os.makedirs(os.path.dirname(model_path), exist_ok=True) - print(f'Downloading TAESD decoder to: {model_path}') + print(f'Downloading TAESD model to: {model_path}') torch.hub.download_url_to_file(model_url, model_path) -def model(): +def decoder_model(): model_name = "taesdxl_decoder.pth" if getattr(shared.sd_model, 'is_sdxl', False) else "taesd_decoder.pth" loaded_model = sd_vae_taesd_models.get(model_name) @@ -78,7 +95,7 @@ def model(): download_model(model_path, 'https://github.com/madebyollin/taesd/raw/main/' + model_name) if os.path.exists(model_path): - loaded_model = TAESD(model_path) + loaded_model = TAESDDecoder(model_path) loaded_model.eval() loaded_model.to(devices.device, devices.dtype) sd_vae_taesd_models[model_name] = loaded_model @@ -86,3 +103,22 @@ def model(): raise FileNotFoundError('TAESD model not found') return loaded_model.decoder + + +def encoder_model(): + model_name = "taesdxl_encoder.pth" if getattr(shared.sd_model, 'is_sdxl', False) else "taesd_encoder.pth" + loaded_model = sd_vae_taesd_models.get(model_name) + + if loaded_model is None: + model_path = os.path.join(paths_internal.models_path, "VAE-taesd", model_name) + download_model(model_path, 'https://github.com/madebyollin/taesd/raw/main/' + model_name) + + if os.path.exists(model_path): + loaded_model = TAESDEncoder(model_path) + loaded_model.eval() + loaded_model.to(devices.device, devices.dtype) + sd_vae_taesd_models[model_name] = loaded_model + else: + raise FileNotFoundError('TAESD model not found') + + return loaded_model.encoder diff --git a/modules/shared.py b/modules/shared.py index cec030f7..d9d01484 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -1,772 +1,52 @@ -import datetime
-import json
-import os
-import re
import sys
-import threading
-import time
-import logging
import gradio as gr
-import torch
-import tqdm
-import launch
-import modules.interrogate
-import modules.memmon
-import modules.styles
-import modules.devices as devices
-from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args
+from modules import shared_cmd_options, shared_gradio_themes, options, shared_items
from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
from ldm.models.diffusion.ddpm import LatentDiffusion
-from typing import Optional
+from modules import util
-log = logging.getLogger(__name__)
-
-demo = None
-
-parser = cmd_args.parser
-
-script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file))
-script_loading.preload_extensions(extensions_builtin_dir, parser)
-
-if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None:
- cmd_opts = parser.parse_args()
-else:
- cmd_opts, _ = parser.parse_known_args()
-
-
-restricted_opts = {
- "samples_filename_pattern",
- "directories_filename_pattern",
- "outdir_samples",
- "outdir_txt2img_samples",
- "outdir_img2img_samples",
- "outdir_extras_samples",
- "outdir_grids",
- "outdir_txt2img_grids",
- "outdir_save",
- "outdir_init_images"
-}
-
-# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json
-gradio_hf_hub_themes = [
- "gradio/glass",
- "gradio/monochrome",
- "gradio/seafoam",
- "gradio/soft",
- "freddyaboulton/dracula_revamped",
- "gradio/dracula_test",
- "abidlabs/dracula_test",
- "abidlabs/pakistan",
- "dawood/microsoft_windows",
- "ysharma/steampunk"
-]
-
-
-cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access
-
-devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
- (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer'])
-
-devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16
-devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16
-
-device = devices.device
-weight_load_location = None if cmd_opts.lowram else "cpu"
+cmd_opts = shared_cmd_options.cmd_opts
+parser = shared_cmd_options.parser
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
-xformers_available = False
-config_filename = cmd_opts.ui_settings_file
-
-os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True)
-hypernetworks = {}
-loaded_hypernetworks = []
-
-
-def reload_hypernetworks():
- from modules.hypernetworks import hypernetwork
- global hypernetworks
-
- hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
-
-
-class State:
- skipped = False
- interrupted = False
- job = ""
- job_no = 0
- job_count = 0
- processing_has_refined_job_count = False
- job_timestamp = '0'
- sampling_step = 0
- sampling_steps = 0
- current_latent = None
- current_image = None
- current_image_sampling_step = 0
- id_live_preview = 0
- textinfo = None
- time_start = None
- server_start = None
- _server_command_signal = threading.Event()
- _server_command: Optional[str] = None
-
- @property
- def need_restart(self) -> bool:
- # Compatibility getter for need_restart.
- return self.server_command == "restart"
-
- @need_restart.setter
- def need_restart(self, value: bool) -> None:
- # Compatibility setter for need_restart.
- if value:
- self.server_command = "restart"
-
- @property
- def server_command(self):
- return self._server_command
-
- @server_command.setter
- def server_command(self, value: Optional[str]) -> None:
- """
- Set the server command to `value` and signal that it's been set.
- """
- self._server_command = value
- self._server_command_signal.set()
-
- def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]:
- """
- Wait for server command to get set; return and clear the value and signal.
- """
- if self._server_command_signal.wait(timeout):
- self._server_command_signal.clear()
- req = self._server_command
- self._server_command = None
- return req
- return None
-
- def request_restart(self) -> None:
- self.interrupt()
- self.server_command = "restart"
- log.info("Received restart request")
-
- def skip(self):
- self.skipped = True
- log.info("Received skip request")
-
- def interrupt(self):
- self.interrupted = True
- log.info("Received interrupt request")
-
- def nextjob(self):
- if opts.live_previews_enable and opts.show_progress_every_n_steps == -1:
- self.do_set_current_image()
-
- self.job_no += 1
- self.sampling_step = 0
- self.current_image_sampling_step = 0
-
- def dict(self):
- obj = {
- "skipped": self.skipped,
- "interrupted": self.interrupted,
- "job": self.job,
- "job_count": self.job_count,
- "job_timestamp": self.job_timestamp,
- "job_no": self.job_no,
- "sampling_step": self.sampling_step,
- "sampling_steps": self.sampling_steps,
- }
-
- return obj
-
- def begin(self, job: str = "(unknown)"):
- self.sampling_step = 0
- self.job_count = -1
- self.processing_has_refined_job_count = False
- self.job_no = 0
- self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
- self.current_latent = None
- self.current_image = None
- self.current_image_sampling_step = 0
- self.id_live_preview = 0
- self.skipped = False
- self.interrupted = False
- self.textinfo = None
- self.time_start = time.time()
- self.job = job
- devices.torch_gc()
- log.info("Starting job %s", job)
-
- def end(self):
- duration = time.time() - self.time_start
- log.info("Ending job %s (%.2f seconds)", self.job, duration)
- self.job = ""
- self.job_count = 0
-
- devices.torch_gc()
-
- def set_current_image(self):
- """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
- if not parallel_processing_allowed:
- return
-
- if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.live_previews_enable and opts.show_progress_every_n_steps != -1:
- self.do_set_current_image()
-
- def do_set_current_image(self):
- if self.current_latent is None:
- return
-
- import modules.sd_samplers
- if opts.show_progress_grid:
- self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent))
- else:
- self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent))
-
- self.current_image_sampling_step = self.sampling_step
-
- def assign_current_image(self, image):
- self.current_image = image
- self.id_live_preview += 1
-
-
-state = State()
-state.server_start = time.time()
-
styles_filename = cmd_opts.styles_file
-prompt_styles = modules.styles.StyleDatabase(styles_filename)
-
-interrogator = modules.interrogate.InterrogateModels("interrogate")
-
-face_restorers = []
-
-
-class OptionInfo:
- def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''):
- self.default = default
- self.label = label
- self.component = component
- self.component_args = component_args
- self.onchange = onchange
- self.section = section
- self.refresh = refresh
-
- self.comment_before = comment_before
- """HTML text that will be added after label in UI"""
-
- self.comment_after = comment_after
- """HTML text that will be added before label in UI"""
-
- def link(self, label, url):
- self.comment_before += f"[<a href='{url}' target='_blank'>{label}</a>]"
- return self
-
- def js(self, label, js_func):
- self.comment_before += f"[<a onclick='{js_func}(); return false'>{label}</a>]"
- return self
-
- def info(self, info):
- self.comment_after += f"<span class='info'>({info})</span>"
- return self
-
- def html(self, html):
- self.comment_after += html
- return self
-
- def needs_restart(self):
- self.comment_after += " <span class='info'>(requires restart)</span>"
- return self
-
-
-
-
-def options_section(section_identifier, options_dict):
- for v in options_dict.values():
- v.section = section_identifier
-
- return options_dict
-
-
-def list_checkpoint_tiles():
- import modules.sd_models
- return modules.sd_models.checkpoint_tiles()
-
-
-def refresh_checkpoints():
- import modules.sd_models
- return modules.sd_models.list_models()
-
-
-def list_samplers():
- import modules.sd_samplers
- return modules.sd_samplers.all_samplers
-
-
+config_filename = cmd_opts.ui_settings_file
hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config}
-tab_names = []
-
-options_templates = {}
-
-options_templates.update(options_section(('saving-images', "Saving images/grids"), {
- "samples_save": OptionInfo(True, "Always save all generated images"),
- "samples_format": OptionInfo('png', 'File format for images'),
- "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
- "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
-
- "grid_save": OptionInfo(True, "Always save all generated image grids"),
- "grid_format": OptionInfo('png', 'File format for grids'),
- "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
- "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
- "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
- "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
- "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
- "font": OptionInfo("", "Font for image grids that have text"),
- "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}),
- "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}),
- "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}),
-
- "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
- "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
- "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
- "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
- "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
- "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"),
- "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"),
- "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
- "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"),
- "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"),
- "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number),
- "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number),
- "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"),
-
- "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"),
- "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"),
- "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
- "save_init_img": OptionInfo(False, "Save init images when using img2img"),
-
- "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"),
- "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"),
-
-}))
-
-options_templates.update(options_section(('saving-paths', "Paths for saving"), {
- "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
- "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
- "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
- "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
- "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
- "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
- "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
- "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
- "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs),
-}))
-
-options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
- "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"),
- "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"),
- "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
- "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
- "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
-}))
-
-options_templates.update(options_section(('upscaling', "Upscaling"), {
- "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"),
- "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"),
- "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
- "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
-}))
-
-options_templates.update(options_section(('face-restoration', "Face restoration"), {
- "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
- "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"),
- "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
-}))
-
-options_templates.update(options_section(('system', "System"), {
- "show_warnings": OptionInfo(False, "Show warnings in console.").needs_restart(),
- "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_restart(),
- "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"),
- "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
- "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
- "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."),
- "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""),
- "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"),
-}))
-
-options_templates.update(options_section(('training', "Training"), {
- "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
- "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
- "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
- "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."),
- "dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
- "dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
- "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
- "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
- "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
- "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."),
- "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."),
- "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."),
-}))
-options_templates.update(options_section(('sd', "Stable Diffusion"), {
- "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
- "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
- "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
- "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"),
- "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"),
- "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"),
- "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}),
- "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
- "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"),
- "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill image's transparent parts with this color.", ui_components.FormColorPicker, {}),
- "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
- "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"),
- "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
- "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
- "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"),
- "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
- "auto_vae_precision": OptionInfo(True, "Automaticlly revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"),
- "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"),
-}))
-
-options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), {
- "sdxl_crop_top": OptionInfo(0, "crop top coordinate"),
- "sdxl_crop_left": OptionInfo(0, "crop left coordinate"),
- "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"),
- "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"),
-}))
-
-options_templates.update(options_section(('optimizations', "Optimizations"), {
- "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}),
- "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
- "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
- "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
- "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
- "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
- "experimental_persistent_cond_cache": OptionInfo(False, "persistent cond cache").info("Experimental, keep cond caches across jobs, reduce overhead."),
-}))
-
-options_templates.update(options_section(('compatibility', "Compatibility"), {
- "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
- "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."),
- "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."),
- "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."),
- "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."),
- "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."),
-}))
-
-options_templates.update(options_section(('interrogate', "Interrogate Options"), {
- "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"),
- "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"),
- "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
- "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
- "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
- "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"),
- "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types),
- "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
- "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"),
- "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"),
- "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"),
- "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"),
-}))
-
-options_templates.update(options_section(('extra_networks', "Extra Networks"), {
- "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."),
- "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'),
- "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}),
- "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"),
- "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"),
- "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"),
- "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"),
- "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"),
- "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_restart(),
- "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"),
- "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"),
- "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks),
-}))
-
-options_templates.update(options_section(('ui', "User interface"), {
- "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_restart(),
- "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).needs_restart(),
- "img2img_editor_height": OptionInfo(720, "img2img: height of image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_restart(),
- "return_grid": OptionInfo(True, "Show grid in results for web"),
- "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"),
- "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"),
- "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
- "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
- "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
- "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
- "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
- "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"),
- "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"),
- "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
- "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_restart(),
- "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_restart(),
- "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
- "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing <extra networks:0.9>", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
- "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"),
- "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"),
- "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_restart(),
- "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(),
- "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(),
- "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_restart(),
- "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires sampler selection").needs_restart(),
- "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_restart(),
- "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_restart(),
-}))
-
-options_templates.update(options_section(('infotext', "Infotext"), {
- "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
- "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
- "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"),
- "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"),
- "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"),
- "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""<ul style='margin-left: 1.5em'>
-<li>Ignore: keep prompt and styles dropdown as it is.</li>
-<li>Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).</li>
-<li>Discard: remove style text from prompt, keep styles dropdown as it is.</li>
-<li>Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.</li>
-</ul>"""),
-
-}))
-
-options_templates.update(options_section(('ui', "Live previews"), {
- "show_progressbar": OptionInfo(True, "Show progressbar"),
- "live_previews_enable": OptionInfo(True, "Show live previews of the created image"),
- "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}),
- "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
- "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"),
- "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"),
- "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}),
- "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"),
-}))
-
-options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
- "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_restart(),
- "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"),
- "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"),
- "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
- 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- 'k_sched_type': OptionInfo("Automatic", "scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"),
- 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"),
- 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise schedule"),
- 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a more steep noise schedule (decreases faster)"),
- 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"),
- 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"),
- 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}),
- 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}),
- 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"),
- 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"),
-}))
-
-options_templates.update(options_section(('postprocessing', "Postprocessing"), {
- 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
- 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
- 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
-}))
-
-options_templates.update(options_section((None, "Hidden options"), {
- "disabled_extensions": OptionInfo([], "Disable these extensions"),
- "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}),
- "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"),
- "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"),
-}))
-
-
-options_templates.update()
-
-
-class Options:
- data = None
- data_labels = options_templates
- typemap = {int: float}
-
- def __init__(self):
- self.data = {k: v.default for k, v in self.data_labels.items()}
-
- def __setattr__(self, key, value):
- if self.data is not None:
- if key in self.data or key in self.data_labels:
- assert not cmd_opts.freeze_settings, "changing settings is disabled"
-
- info = opts.data_labels.get(key, None)
- comp_args = info.component_args if info else None
- if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
- raise RuntimeError(f"not possible to set {key} because it is restricted")
-
- if cmd_opts.hide_ui_dir_config and key in restricted_opts:
- raise RuntimeError(f"not possible to set {key} because it is restricted")
-
- self.data[key] = value
- return
-
- return super(Options, self).__setattr__(key, value)
-
- def __getattr__(self, item):
- if self.data is not None:
- if item in self.data:
- return self.data[item]
-
- if item in self.data_labels:
- return self.data_labels[item].default
-
- return super(Options, self).__getattribute__(item)
-
- def set(self, key, value):
- """sets an option and calls its onchange callback, returning True if the option changed and False otherwise"""
-
- oldval = self.data.get(key, None)
- if oldval == value:
- return False
-
- try:
- setattr(self, key, value)
- except RuntimeError:
- return False
-
- if self.data_labels[key].onchange is not None:
- try:
- self.data_labels[key].onchange()
- except Exception as e:
- errors.display(e, f"changing setting {key} to {value}")
- setattr(self, key, oldval)
- return False
-
- return True
-
- def get_default(self, key):
- """returns the default value for the key"""
-
- data_label = self.data_labels.get(key)
- if data_label is None:
- return None
-
- return data_label.default
-
- def save(self, filename):
- assert not cmd_opts.freeze_settings, "saving settings is disabled"
-
- with open(filename, "w", encoding="utf8") as file:
- json.dump(self.data, file, indent=4)
-
- def same_type(self, x, y):
- if x is None or y is None:
- return True
-
- type_x = self.typemap.get(type(x), type(x))
- type_y = self.typemap.get(type(y), type(y))
-
- return type_x == type_y
-
- def load(self, filename):
- with open(filename, "r", encoding="utf8") as file:
- self.data = json.load(file)
-
- # 1.1.1 quicksettings list migration
- if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None:
- self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')]
-
- # 1.4.0 ui_reorder
- if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data:
- self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')]
-
- bad_settings = 0
- for k, v in self.data.items():
- info = self.data_labels.get(k, None)
- if info is not None and not self.same_type(info.default, v):
- print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
- bad_settings += 1
-
- if bad_settings > 0:
- print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
-
- def onchange(self, key, func, call=True):
- item = self.data_labels.get(key)
- item.onchange = func
-
- if call:
- func()
-
- def dumpjson(self):
- d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()}
- d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None}
- d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None}
- return json.dumps(d)
-
- def add_option(self, key, info):
- self.data_labels[key] = info
-
- def reorder(self):
- """reorder settings so that all items related to section always go together"""
-
- section_ids = {}
- settings_items = self.data_labels.items()
- for _, item in settings_items:
- if item.section not in section_ids:
- section_ids[item.section] = len(section_ids)
-
- self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section]))
-
- def cast_value(self, key, value):
- """casts an arbitrary to the same type as this setting's value with key
- Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str)
- """
-
- if value is None:
- return None
-
- default_value = self.data_labels[key].default
- if default_value is None:
- default_value = getattr(self, key, None)
- if default_value is None:
- return None
-
- expected_type = type(default_value)
- if expected_type == bool and value == "False":
- value = False
- else:
- value = expected_type(value)
-
- return value
+demo = None
+device = None
-opts = Options()
-if os.path.exists(config_filename):
- opts.load(config_filename)
+weight_load_location = None
+xformers_available = False
-class Shared(sys.modules[__name__].__class__):
- """
- this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than
- at program startup.
- """
+hypernetworks = {}
- sd_model_val = None
+loaded_hypernetworks = []
- @property
- def sd_model(self):
- import modules.sd_models
+state = None
- return modules.sd_models.model_data.get_sd_model()
+prompt_styles = None
- @sd_model.setter
- def sd_model(self, value):
- import modules.sd_models
+interrogator = None
- modules.sd_models.model_data.set_sd_model(value)
+face_restorers = []
+options_templates = None
+opts = None
+restricted_opts = None
-sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead
-sys.modules[__name__].__class__ = Shared
+sd_model: LatentDiffusion = None
settings_components = None
"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings"""
+tab_names = []
+
latent_upscale_default_mode = "Latent"
latent_upscale_modes = {
"Latent": {"mode": "bilinear", "antialias": False},
@@ -785,108 +65,24 @@ progress_print_out = sys.stdout gradio_theme = gr.themes.Base()
+total_tqdm = None
-def reload_gradio_theme(theme_name=None):
- global gradio_theme
- if not theme_name:
- theme_name = opts.gradio_theme
-
- default_theme_args = dict(
- font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'],
- font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'],
- )
-
- if theme_name == "Default":
- gradio_theme = gr.themes.Default(**default_theme_args)
- else:
- try:
- gradio_theme = gr.themes.ThemeClass.from_hub(theme_name)
- except Exception as e:
- errors.display(e, "changing gradio theme")
- gradio_theme = gr.themes.Default(**default_theme_args)
-
-
-
-class TotalTQDM:
- def __init__(self):
- self._tqdm = None
-
- def reset(self):
- self._tqdm = tqdm.tqdm(
- desc="Total progress",
- total=state.job_count * state.sampling_steps,
- position=1,
- file=progress_print_out
- )
-
- def update(self):
- if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
- return
- if self._tqdm is None:
- self.reset()
- self._tqdm.update()
-
- def updateTotal(self, new_total):
- if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
- return
- if self._tqdm is None:
- self.reset()
- self._tqdm.total = new_total
-
- def clear(self):
- if self._tqdm is not None:
- self._tqdm.refresh()
- self._tqdm.close()
- self._tqdm = None
-
-
-total_tqdm = TotalTQDM()
-
-mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
-mem_mon.start()
-
-
-def natural_sort_key(s, regex=re.compile('([0-9]+)')):
- return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)]
-
-
-def listfiles(dirname):
- filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")]
- return [file for file in filenames if os.path.isfile(file)]
-
-
-def html_path(filename):
- return os.path.join(script_path, "html", filename)
-
-
-def html(filename):
- path = html_path(filename)
-
- if os.path.exists(path):
- with open(path, encoding="utf8") as file:
- return file.read()
-
- return ""
-
-
-def walk_files(path, allowed_extensions=None):
- if not os.path.exists(path):
- return
-
- if allowed_extensions is not None:
- allowed_extensions = set(allowed_extensions)
+mem_mon = None
- items = list(os.walk(path, followlinks=True))
- items = sorted(items, key=lambda x: natural_sort_key(x[0]))
+options_section = options.options_section
+OptionInfo = options.OptionInfo
+OptionHTML = options.OptionHTML
- for root, _, files in items:
- for filename in sorted(files, key=natural_sort_key):
- if allowed_extensions is not None:
- _, ext = os.path.splitext(filename)
- if ext not in allowed_extensions:
- continue
+natural_sort_key = util.natural_sort_key
+listfiles = util.listfiles
+html_path = util.html_path
+html = util.html
+walk_files = util.walk_files
+ldm_print = util.ldm_print
- if not opts.list_hidden_files and ("/." in root or "\\." in root):
- continue
+reload_gradio_theme = shared_gradio_themes.reload_gradio_theme
- yield os.path.join(root, filename)
+list_checkpoint_tiles = shared_items.list_checkpoint_tiles
+refresh_checkpoints = shared_items.refresh_checkpoints
+list_samplers = shared_items.list_samplers
+reload_hypernetworks = shared_items.reload_hypernetworks
diff --git a/modules/shared_cmd_options.py b/modules/shared_cmd_options.py new file mode 100644 index 00000000..af24938b --- /dev/null +++ b/modules/shared_cmd_options.py @@ -0,0 +1,18 @@ +import os
+
+import launch
+from modules import cmd_args, script_loading
+from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
+
+parser = cmd_args.parser
+
+script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file))
+script_loading.preload_extensions(extensions_builtin_dir, parser)
+
+if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None:
+ cmd_opts = parser.parse_args()
+else:
+ cmd_opts, _ = parser.parse_known_args()
+
+
+cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access
diff --git a/modules/shared_gradio_themes.py b/modules/shared_gradio_themes.py new file mode 100644 index 00000000..485e89d5 --- /dev/null +++ b/modules/shared_gradio_themes.py @@ -0,0 +1,66 @@ +import os
+
+import gradio as gr
+
+from modules import errors, shared
+from modules.paths_internal import script_path
+
+
+# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json
+gradio_hf_hub_themes = [
+ "gradio/base",
+ "gradio/glass",
+ "gradio/monochrome",
+ "gradio/seafoam",
+ "gradio/soft",
+ "gradio/dracula_test",
+ "abidlabs/dracula_test",
+ "abidlabs/Lime",
+ "abidlabs/pakistan",
+ "Ama434/neutral-barlow",
+ "dawood/microsoft_windows",
+ "finlaymacklon/smooth_slate",
+ "Franklisi/darkmode",
+ "freddyaboulton/dracula_revamped",
+ "freddyaboulton/test-blue",
+ "gstaff/xkcd",
+ "Insuz/Mocha",
+ "Insuz/SimpleIndigo",
+ "JohnSmith9982/small_and_pretty",
+ "nota-ai/theme",
+ "nuttea/Softblue",
+ "ParityError/Anime",
+ "reilnuud/polite",
+ "remilia/Ghostly",
+ "rottenlittlecreature/Moon_Goblin",
+ "step-3-profit/Midnight-Deep",
+ "Taithrah/Minimal",
+ "ysharma/huggingface",
+ "ysharma/steampunk"
+]
+
+
+def reload_gradio_theme(theme_name=None):
+ if not theme_name:
+ theme_name = shared.opts.gradio_theme
+
+ default_theme_args = dict(
+ font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'],
+ font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'],
+ )
+
+ if theme_name == "Default":
+ shared.gradio_theme = gr.themes.Default(**default_theme_args)
+ else:
+ try:
+ theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes')
+ theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json')
+ if shared.opts.gradio_themes_cache and os.path.exists(theme_cache_path):
+ shared.gradio_theme = gr.themes.ThemeClass.load(theme_cache_path)
+ else:
+ os.makedirs(theme_cache_dir, exist_ok=True)
+ shared.gradio_theme = gr.themes.ThemeClass.from_hub(theme_name)
+ shared.gradio_theme.dump(theme_cache_path)
+ except Exception as e:
+ errors.display(e, "changing gradio theme")
+ shared.gradio_theme = gr.themes.Default(**default_theme_args)
diff --git a/modules/shared_init.py b/modules/shared_init.py new file mode 100644 index 00000000..d3fb687e --- /dev/null +++ b/modules/shared_init.py @@ -0,0 +1,49 @@ +import os
+
+import torch
+
+from modules import shared
+from modules.shared import cmd_opts
+
+
+def initialize():
+ """Initializes fields inside the shared module in a controlled manner.
+
+ Should be called early because some other modules you can import mingt need these fields to be already set.
+ """
+
+ os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True)
+
+ from modules import options, shared_options
+ shared.options_templates = shared_options.options_templates
+ shared.opts = options.Options(shared_options.options_templates, shared_options.restricted_opts)
+ shared.restricted_opts = shared_options.restricted_opts
+ if os.path.exists(shared.config_filename):
+ shared.opts.load(shared.config_filename)
+
+ from modules import devices
+ devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
+ (devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer'])
+
+ devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16
+ devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16
+
+ shared.device = devices.device
+ shared.weight_load_location = None if cmd_opts.lowram else "cpu"
+
+ from modules import shared_state
+ shared.state = shared_state.State()
+
+ from modules import styles
+ shared.prompt_styles = styles.StyleDatabase(shared.styles_filename)
+
+ from modules import interrogate
+ shared.interrogator = interrogate.InterrogateModels("interrogate")
+
+ from modules import shared_total_tqdm
+ shared.total_tqdm = shared_total_tqdm.TotalTQDM()
+
+ from modules import memmon, devices
+ shared.mem_mon = memmon.MemUsageMonitor("MemMon", devices.device, shared.opts)
+ shared.mem_mon.start()
+
diff --git a/modules/shared_items.py b/modules/shared_items.py index 89792e88..e4ec40a8 100644 --- a/modules/shared_items.py +++ b/modules/shared_items.py @@ -1,3 +1,6 @@ +import sys
+
+from modules.shared_cmd_options import cmd_opts
def realesrgan_models_names():
@@ -41,6 +44,28 @@ def refresh_unet_list(): modules.sd_unet.list_unets()
+def list_checkpoint_tiles():
+ import modules.sd_models
+ return modules.sd_models.checkpoint_tiles()
+
+
+def refresh_checkpoints():
+ import modules.sd_models
+ return modules.sd_models.list_models()
+
+
+def list_samplers():
+ import modules.sd_samplers
+ return modules.sd_samplers.all_samplers
+
+
+def reload_hypernetworks():
+ from modules.hypernetworks import hypernetwork
+ from modules import shared
+
+ shared.hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
+
+
ui_reorder_categories_builtin_items = [
"inpaint",
"sampler",
@@ -67,3 +92,27 @@ def ui_reorder_categories(): yield from sections
yield "scripts"
+
+
+class Shared(sys.modules[__name__].__class__):
+ """
+ this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than
+ at program startup.
+ """
+
+ sd_model_val = None
+
+ @property
+ def sd_model(self):
+ import modules.sd_models
+
+ return modules.sd_models.model_data.get_sd_model()
+
+ @sd_model.setter
+ def sd_model(self, value):
+ import modules.sd_models
+
+ modules.sd_models.model_data.set_sd_model(value)
+
+
+sys.modules['modules.shared'].__class__ = Shared
diff --git a/modules/shared_options.py b/modules/shared_options.py new file mode 100644 index 00000000..7468bc81 --- /dev/null +++ b/modules/shared_options.py @@ -0,0 +1,316 @@ +import gradio as gr
+
+from modules import localization, ui_components, shared_items, shared, interrogate, shared_gradio_themes
+from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
+from modules.shared_cmd_options import cmd_opts
+from modules.options import options_section, OptionInfo, OptionHTML
+
+options_templates = {}
+hide_dirs = shared.hide_dirs
+
+restricted_opts = {
+ "samples_filename_pattern",
+ "directories_filename_pattern",
+ "outdir_samples",
+ "outdir_txt2img_samples",
+ "outdir_img2img_samples",
+ "outdir_extras_samples",
+ "outdir_grids",
+ "outdir_txt2img_grids",
+ "outdir_save",
+ "outdir_init_images"
+}
+
+options_templates.update(options_section(('saving-images', "Saving images/grids"), {
+ "samples_save": OptionInfo(True, "Always save all generated images"),
+ "samples_format": OptionInfo('png', 'File format for images'),
+ "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
+ "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
+
+ "grid_save": OptionInfo(True, "Always save all generated image grids"),
+ "grid_format": OptionInfo('png', 'File format for grids'),
+ "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
+ "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
+ "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
+ "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
+ "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
+ "font": OptionInfo("", "Font for image grids that have text"),
+ "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}),
+ "grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}),
+ "grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}),
+
+ "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
+ "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
+ "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
+ "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
+ "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
+ "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"),
+ "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"),
+ "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
+ "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"),
+ "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"),
+ "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number),
+ "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number),
+ "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"),
+
+ "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"),
+ "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"),
+ "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
+ "save_init_img": OptionInfo(False, "Save init images when using img2img"),
+
+ "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"),
+ "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"),
+
+ "save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."),
+}))
+
+options_templates.update(options_section(('saving-paths', "Paths for saving"), {
+ "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
+ "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
+ "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
+ "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
+ "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
+ "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
+ "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
+ "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
+ "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs),
+}))
+
+options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
+ "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"),
+ "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"),
+ "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
+ "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
+ "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
+}))
+
+options_templates.update(options_section(('upscaling', "Upscaling"), {
+ "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"),
+ "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"),
+ "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
+ "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}),
+}))
+
+options_templates.update(options_section(('face-restoration', "Face restoration"), {
+ "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in shared.face_restorers]}),
+ "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"),
+ "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
+}))
+
+options_templates.update(options_section(('system', "System"), {
+ "auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}),
+ "show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(),
+ "show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(),
+ "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"),
+ "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
+ "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
+ "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."),
+ "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""),
+ "disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"),
+ "hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."),
+}))
+
+options_templates.update(options_section(('training', "Training"), {
+ "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
+ "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
+ "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
+ "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."),
+ "dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
+ "dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
+ "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
+ "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
+ "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
+ "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."),
+ "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."),
+ "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."),
+}))
+
+options_templates.update(options_section(('sd', "Stable Diffusion"), {
+ "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": shared_items.list_checkpoint_tiles()}, refresh=shared_items.refresh_checkpoints),
+ "sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}),
+ "sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"),
+ "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"),
+ "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"),
+ "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(),
+ "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"),
+ "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
+ "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
+ "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"),
+ "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
+ "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"),
+}))
+
+options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), {
+ "sdxl_crop_top": OptionInfo(0, "crop top coordinate"),
+ "sdxl_crop_left": OptionInfo(0, "crop left coordinate"),
+ "sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"),
+ "sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"),
+}))
+
+options_templates.update(options_section(('vae', "VAE"), {
+ "sd_vae_explanation": OptionHTML("""
+<abbr title='Variational autoencoder'>VAE</abbr> is a neural network that transforms a standard <abbr title='red/green/blue'>RGB</abbr>
+image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling
+(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished.
+For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling.
+"""),
+ "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
+ "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"),
+ "sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"),
+ "auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"),
+ "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"),
+ "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"),
+}))
+
+options_templates.update(options_section(('img2img', "img2img"), {
+ "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
+ "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}),
+ "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
+ "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"),
+ "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}),
+ "img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(),
+ "img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(),
+ "img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(),
+ "img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(),
+ "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"),
+ "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"),
+}))
+
+options_templates.update(options_section(('optimizations', "Optimizations"), {
+ "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}),
+ "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
+ "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
+ "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
+ "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
+ "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
+ "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"),
+}))
+
+options_templates.update(options_section(('compatibility', "Compatibility"), {
+ "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
+ "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."),
+ "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."),
+ "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."),
+ "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."),
+ "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."),
+}))
+
+options_templates.update(options_section(('interrogate', "Interrogate"), {
+ "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"),
+ "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"),
+ "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
+ "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
+ "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
+ "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"),
+ "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": interrogate.category_types()}, refresh=interrogate.category_types),
+ "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
+ "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"),
+ "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"),
+ "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"),
+ "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"),
+}))
+
+options_templates.update(options_section(('extra_networks', "Extra Networks"), {
+ "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."),
+ "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'),
+ "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}),
+ "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"),
+ "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"),
+ "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"),
+ "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"),
+ "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"),
+ "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(),
+ "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"),
+ "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"),
+ "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *shared.hypernetworks]}, refresh=shared_items.reload_hypernetworks),
+}))
+
+options_templates.update(options_section(('ui', "User interface"), {
+ "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(),
+ "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + shared_gradio_themes.gradio_hf_hub_themes}).info("you can also manually enter any of themes from the <a href='https://huggingface.co/spaces/gradio/theme-gallery'>gallery</a>.").needs_reload_ui(),
+ "gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"),
+ "return_grid": OptionInfo(True, "Show grid in results for web"),
+ "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
+ "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
+ "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
+ "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
+ "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
+ "js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"),
+ "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"),
+ "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
+ "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(),
+ "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(),
+ "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
+ "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing <extra networks:0.9>", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
+ "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"),
+ "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"),
+ "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(),
+ "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(),
+ "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(),
+ "ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(),
+ "hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(),
+ "hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(),
+ "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(),
+}))
+
+
+options_templates.update(options_section(('infotext', "Infotext"), {
+ "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
+ "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
+ "add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"),
+ "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"),
+ "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"),
+ "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""<ul style='margin-left: 1.5em'>
+<li>Ignore: keep prompt and styles dropdown as it is.</li>
+<li>Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).</li>
+<li>Discard: remove style text from prompt, keep styles dropdown as it is.</li>
+<li>Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.</li>
+</ul>"""),
+
+}))
+
+options_templates.update(options_section(('ui', "Live previews"), {
+ "show_progressbar": OptionInfo(True, "Show progressbar"),
+ "live_previews_enable": OptionInfo(True, "Show live previews of the created image"),
+ "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}),
+ "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
+ "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"),
+ "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"),
+ "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}),
+ "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"),
+}))
+
+options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
+ "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in shared_items.list_samplers()]}).needs_reload_ui(),
+ "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"),
+ "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"),
+ "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
+ 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}).info('amount of stochasticity; only applies to Euler, Heun, and DPM2'),
+ 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}).info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'),
+ 's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}).info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"),
+ 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}).info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'),
+ 'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"),
+ 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"),
+ 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"),
+ 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"),
+ 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"),
+ 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"),
+ 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}),
+ 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}),
+ 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"),
+ 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"),
+}))
+
+options_templates.update(options_section(('postprocessing', "Postprocessing"), {
+ 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
+ 'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
+ 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
+}))
+
+options_templates.update(options_section((None, "Hidden options"), {
+ "disabled_extensions": OptionInfo([], "Disable these extensions"),
+ "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}),
+ "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"),
+ "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"),
+}))
+
diff --git a/modules/shared_state.py b/modules/shared_state.py new file mode 100644 index 00000000..3dc9c788 --- /dev/null +++ b/modules/shared_state.py @@ -0,0 +1,159 @@ +import datetime
+import logging
+import threading
+import time
+
+from modules import errors, shared, devices
+from typing import Optional
+
+log = logging.getLogger(__name__)
+
+
+class State:
+ skipped = False
+ interrupted = False
+ job = ""
+ job_no = 0
+ job_count = 0
+ processing_has_refined_job_count = False
+ job_timestamp = '0'
+ sampling_step = 0
+ sampling_steps = 0
+ current_latent = None
+ current_image = None
+ current_image_sampling_step = 0
+ id_live_preview = 0
+ textinfo = None
+ time_start = None
+ server_start = None
+ _server_command_signal = threading.Event()
+ _server_command: Optional[str] = None
+
+ def __init__(self):
+ self.server_start = time.time()
+
+ @property
+ def need_restart(self) -> bool:
+ # Compatibility getter for need_restart.
+ return self.server_command == "restart"
+
+ @need_restart.setter
+ def need_restart(self, value: bool) -> None:
+ # Compatibility setter for need_restart.
+ if value:
+ self.server_command = "restart"
+
+ @property
+ def server_command(self):
+ return self._server_command
+
+ @server_command.setter
+ def server_command(self, value: Optional[str]) -> None:
+ """
+ Set the server command to `value` and signal that it's been set.
+ """
+ self._server_command = value
+ self._server_command_signal.set()
+
+ def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]:
+ """
+ Wait for server command to get set; return and clear the value and signal.
+ """
+ if self._server_command_signal.wait(timeout):
+ self._server_command_signal.clear()
+ req = self._server_command
+ self._server_command = None
+ return req
+ return None
+
+ def request_restart(self) -> None:
+ self.interrupt()
+ self.server_command = "restart"
+ log.info("Received restart request")
+
+ def skip(self):
+ self.skipped = True
+ log.info("Received skip request")
+
+ def interrupt(self):
+ self.interrupted = True
+ log.info("Received interrupt request")
+
+ def nextjob(self):
+ if shared.opts.live_previews_enable and shared.opts.show_progress_every_n_steps == -1:
+ self.do_set_current_image()
+
+ self.job_no += 1
+ self.sampling_step = 0
+ self.current_image_sampling_step = 0
+
+ def dict(self):
+ obj = {
+ "skipped": self.skipped,
+ "interrupted": self.interrupted,
+ "job": self.job,
+ "job_count": self.job_count,
+ "job_timestamp": self.job_timestamp,
+ "job_no": self.job_no,
+ "sampling_step": self.sampling_step,
+ "sampling_steps": self.sampling_steps,
+ }
+
+ return obj
+
+ def begin(self, job: str = "(unknown)"):
+ self.sampling_step = 0
+ self.job_count = -1
+ self.processing_has_refined_job_count = False
+ self.job_no = 0
+ self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
+ self.current_latent = None
+ self.current_image = None
+ self.current_image_sampling_step = 0
+ self.id_live_preview = 0
+ self.skipped = False
+ self.interrupted = False
+ self.textinfo = None
+ self.time_start = time.time()
+ self.job = job
+ devices.torch_gc()
+ log.info("Starting job %s", job)
+
+ def end(self):
+ duration = time.time() - self.time_start
+ log.info("Ending job %s (%.2f seconds)", self.job, duration)
+ self.job = ""
+ self.job_count = 0
+
+ devices.torch_gc()
+
+ def set_current_image(self):
+ """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
+ if not shared.parallel_processing_allowed:
+ return
+
+ if self.sampling_step - self.current_image_sampling_step >= shared.opts.show_progress_every_n_steps and shared.opts.live_previews_enable and shared.opts.show_progress_every_n_steps != -1:
+ self.do_set_current_image()
+
+ def do_set_current_image(self):
+ if self.current_latent is None:
+ return
+
+ import modules.sd_samplers
+
+ try:
+ if shared.opts.show_progress_grid:
+ self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent))
+ else:
+ self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent))
+
+ self.current_image_sampling_step = self.sampling_step
+
+ except Exception:
+ # when switching models during genration, VAE would be on CPU, so creating an image will fail.
+ # we silently ignore this error
+ errors.record_exception()
+
+ def assign_current_image(self, image):
+ self.current_image = image
+ self.id_live_preview += 1
diff --git a/modules/shared_total_tqdm.py b/modules/shared_total_tqdm.py new file mode 100644 index 00000000..cf82e104 --- /dev/null +++ b/modules/shared_total_tqdm.py @@ -0,0 +1,37 @@ +import tqdm
+
+from modules import shared
+
+
+class TotalTQDM:
+ def __init__(self):
+ self._tqdm = None
+
+ def reset(self):
+ self._tqdm = tqdm.tqdm(
+ desc="Total progress",
+ total=shared.state.job_count * shared.state.sampling_steps,
+ position=1,
+ file=shared.progress_print_out
+ )
+
+ def update(self):
+ if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars:
+ return
+ if self._tqdm is None:
+ self.reset()
+ self._tqdm.update()
+
+ def updateTotal(self, new_total):
+ if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars:
+ return
+ if self._tqdm is None:
+ self.reset()
+ self._tqdm.total = new_total
+
+ def clear(self):
+ if self._tqdm is not None:
+ self._tqdm.refresh()
+ self._tqdm.close()
+ self._tqdm = None
+
diff --git a/modules/sysinfo.py b/modules/sysinfo.py index cf24c6dd..7d906e1f 100644 --- a/modules/sysinfo.py +++ b/modules/sysinfo.py @@ -10,7 +10,7 @@ import psutil import re
import launch
-from modules import paths_internal, timer
+from modules import paths_internal, timer, shared, extensions, errors
checksum_token = "DontStealMyGamePlz__WINNERS_DONT_USE_DRUGS__DONT_COPY_THAT_FLOPPY"
environment_whitelist = {
@@ -115,8 +115,6 @@ def format_exception(e, tb): def get_exceptions():
try:
- from modules import errors
-
return list(reversed(errors.exception_records))
except Exception as e:
return str(e)
@@ -142,8 +140,6 @@ def get_torch_sysinfo(): def get_extensions(*, enabled):
try:
- from modules import extensions
-
def to_json(x: extensions.Extension):
return {
"name": x.name,
@@ -160,7 +156,6 @@ def get_extensions(*, enabled): def get_config():
try:
- from modules import shared
return shared.opts.data
except Exception as e:
return str(e)
diff --git a/modules/txt2img.py b/modules/txt2img.py index 29d94e8c..edad8930 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -1,7 +1,7 @@ from contextlib import closing
import modules.scripts
-from modules import sd_samplers, processing
+from modules import processing
from modules.generation_parameters_copypaste import create_override_settings_dict
from modules.shared import opts, cmd_opts
import modules.shared as shared
@@ -9,7 +9,7 @@ from modules.ui import plaintext_to_html import gradio as gr
-def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args):
+def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args):
override_settings = create_override_settings_dict(override_settings_texts)
p = processing.StableDiffusionProcessingTxt2Img(
@@ -25,7 +25,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step seed_resize_from_h=seed_resize_from_h,
seed_resize_from_w=seed_resize_from_w,
seed_enable_extras=seed_enable_extras,
- sampler_name=sd_samplers.samplers[sampler_index].name,
+ sampler_name=sampler_name,
batch_size=batch_size,
n_iter=n_iter,
steps=steps,
@@ -41,7 +41,8 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step hr_second_pass_steps=hr_second_pass_steps,
hr_resize_x=hr_resize_x,
hr_resize_y=hr_resize_y,
- hr_sampler_name=sd_samplers.samplers_for_img2img[hr_sampler_index - 1].name if hr_sampler_index != 0 else None,
+ hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name,
+ hr_sampler_name=None if hr_sampler_name == 'Use same sampler' else hr_sampler_name,
hr_prompt=hr_prompt,
hr_negative_prompt=hr_negative_prompt,
override_settings=override_settings,
diff --git a/modules/ui.py b/modules/ui.py index 586174b8..4e1daa8d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -13,7 +13,7 @@ from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
from modules import gradio_extensons # noqa: F401
-from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts
+from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, devices, ui_extra_networks
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
from modules.paths import script_path
from modules.ui_common import create_refresh_button
@@ -29,7 +29,6 @@ import modules.shared as shared import modules.images
from modules import prompt_parser
from modules.sd_hijack import model_hijack
-from modules.sd_samplers import samplers, samplers_for_img2img
from modules.generation_parameters_copypaste import image_from_url_text
create_setting_component = ui_settings.create_setting_component
@@ -41,6 +40,9 @@ warnings.filterwarnings("default" if opts.show_gradio_deprecation_warnings else mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
+# Likewise, add explicit content-type header for certain missing image types
+mimetypes.add_type('image/webp', '.webp')
+
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
gradio.utils.version_check = lambda: None
@@ -76,7 +78,6 @@ extra_networks_symbol = '\U0001F3B4' # 🎴 switch_values_symbol = '\U000021C5' # ⇅
restore_progress_symbol = '\U0001F300' # 🌀
detect_image_size_symbol = '\U0001F4D0' # 📐
-up_down_symbol = '\u2195\ufe0f' # ↕️
plaintext_to_html = ui_common.plaintext_to_html
@@ -89,8 +90,6 @@ def send_gradio_gallery_to_image(x): def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y):
- from modules import processing, devices
-
if not enable:
return ""
@@ -261,7 +260,6 @@ class Toprow: with gr.Row():
self.negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)", elem_classes=["prompt"])
-
self.button_interrogate = None
self.button_deepbooru = None
if is_img2img:
@@ -290,7 +288,6 @@ class Toprow: with gr.Row(elem_id=f"{id_part}_tools"):
self.paste = ToolButton(value=paste_symbol, elem_id="paste")
self.clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt")
- self.extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks")
self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False)
self.token_counter = gr.HTML(value="<span>0/75</span>", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"])
@@ -359,14 +356,14 @@ def create_output_panel(tabname, outdir): def create_sampler_and_steps_selection(choices, tabname):
if opts.samplers_in_dropdown:
with FormRow(elem_id=f"sampler_selection_{tabname}"):
- sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
+ sampler_name = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=choices, value=choices[0])
steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
else:
with FormGroup(elem_id=f"sampler_selection_{tabname}"):
steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
- sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
+ sampler_name = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=choices, value=choices[0])
- return steps, sampler_index
+ return steps, sampler_name
def ordered_ui_categories():
@@ -404,17 +401,16 @@ def create_ui(): dummy_component = gr.Label(visible=False)
- with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks:
- from modules import ui_extra_networks
- extra_networks_ui = ui_extra_networks.create_ui(extra_networks, toprow.extra_networks_button, 'txt2img')
+ extra_tabs = gr.Tabs(elem_id="txt2img_extra_tabs")
+ extra_tabs.__enter__()
- with gr.Row(equal_height=False):
+ with gr.Tab("Generation", id="txt2img_generation") as txt2img_generation_tab, gr.Row(equal_height=False):
with gr.Column(variant='compact', elem_id="txt2img_settings"):
scripts.scripts_txt2img.prepare_ui()
for category in ordered_ui_categories():
if category == "sampler":
- steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img")
+ steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "txt2img")
elif category == "dimensions":
with FormRow():
@@ -456,7 +452,11 @@ def create_ui(): hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y")
with FormRow(elem_id="txt2img_hires_fix_row3", variant="compact", visible=opts.hires_fix_show_sampler) as hr_sampler_container:
- hr_sampler_index = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + [x.name for x in samplers_for_img2img], value="Use same sampler", type="index")
+
+ hr_checkpoint_name = gr.Dropdown(label='Hires checkpoint', elem_id="hr_checkpoint", choices=["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True), value="Use same checkpoint")
+ create_refresh_button(hr_checkpoint_name, modules.sd_models.list_models, lambda: {"choices": ["Use same checkpoint"] + modules.sd_models.checkpoint_tiles(use_short=True)}, "hr_checkpoint_refresh")
+
+ hr_sampler_name = gr.Dropdown(label='Hires sampling method', elem_id="hr_sampler", choices=["Use same sampler"] + sd_samplers.visible_sampler_names(), value="Use same sampler")
with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container:
with gr.Column(scale=80):
@@ -516,7 +516,7 @@ def create_ui(): toprow.negative_prompt,
toprow.ui_styles.dropdown,
steps,
- sampler_index,
+ sampler_name,
restore_faces,
tiling,
batch_count,
@@ -533,7 +533,8 @@ def create_ui(): hr_second_pass_steps,
hr_resize_x,
hr_resize_y,
- hr_sampler_index,
+ hr_checkpoint_name,
+ hr_sampler_name,
hr_prompt,
hr_negative_prompt,
override_settings,
@@ -578,7 +579,7 @@ def create_ui(): (toprow.prompt, "Prompt"),
(toprow.negative_prompt, "Negative prompt"),
(steps, "Steps"),
- (sampler_index, "Sampler"),
+ (sampler_name, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(seed, "Seed"),
@@ -599,8 +600,9 @@ def create_ui(): (hr_second_pass_steps, "Hires steps"),
(hr_resize_x, "Hires resize-1"),
(hr_resize_y, "Hires resize-2"),
- (hr_sampler_index, "Hires sampler"),
- (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" else gr.update()),
+ (hr_checkpoint_name, "Hires checkpoint"),
+ (hr_sampler_name, "Hires sampler"),
+ (hr_sampler_container, lambda d: gr.update(visible=True) if d.get("Hires sampler", "Use same sampler") != "Use same sampler" or d.get("Hires checkpoint", "Use same checkpoint") != "Use same checkpoint" else gr.update()),
(hr_prompt, "Hires prompt"),
(hr_negative_prompt, "Hires negative prompt"),
(hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()),
@@ -615,7 +617,7 @@ def create_ui(): toprow.prompt,
toprow.negative_prompt,
steps,
- sampler_index,
+ sampler_name,
cfg_scale,
seed,
width,
@@ -625,7 +627,10 @@ def create_ui(): toprow.token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps], outputs=[toprow.token_counter])
toprow.negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter])
- ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery)
+ extra_networks_ui = ui_extra_networks.create_ui(txt2img_interface, [txt2img_generation_tab], 'txt2img')
+ ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery)
+
+ extra_tabs.__exit__()
scripts.scripts_current = scripts.scripts_img2img
scripts.scripts_img2img.initialize_scripts(is_img2img=True)
@@ -633,11 +638,10 @@ def create_ui(): with gr.Blocks(analytics_enabled=False) as img2img_interface:
toprow = Toprow(is_img2img=True)
- with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks:
- from modules import ui_extra_networks
- extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, toprow.extra_networks_button, 'img2img')
+ extra_tabs = gr.Tabs(elem_id="img2img_extra_tabs")
+ extra_tabs.__enter__()
- with FormRow(equal_height=False):
+ with gr.Tab("Generation", id="img2img_generation") as img2img_generation_tab, FormRow().style(equal_height=False):
with gr.Column(variant='compact', elem_id="img2img_settings"):
copy_image_buttons = []
copy_image_destinations = {}
@@ -663,15 +667,15 @@ def create_ui(): add_copy_image_controls('img2img', init_img)
with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch:
- sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height)
+ sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_sketch_default_brush_color)
add_copy_image_controls('sketch', sketch)
with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint:
- init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height)
+ init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_mask_brush_color)
add_copy_image_controls('inpaint', init_img_with_mask)
with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color:
- inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height)
+ inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA", height=opts.img2img_editor_height, brush_color=opts.img2img_inpaint_sketch_default_brush_color)
inpaint_color_sketch_orig = gr.State(None)
add_copy_image_controls('inpaint_sketch', inpaint_color_sketch)
@@ -735,7 +739,7 @@ def create_ui(): for category in ordered_ui_categories():
if category == "sampler":
- steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img")
+ steps, sampler_name = create_sampler_and_steps_selection(sd_samplers.visible_sampler_names(), "img2img")
elif category == "dimensions":
with FormRow():
@@ -867,7 +871,7 @@ def create_ui(): init_img_inpaint,
init_mask_inpaint,
steps,
- sampler_index,
+ sampler_name,
mask_blur,
mask_alpha,
inpainting_fill,
@@ -959,13 +963,11 @@ def create_ui(): toprow.token_button.click(fn=update_token_counter, inputs=[toprow.prompt, steps], outputs=[toprow.token_counter])
toprow.negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter])
- ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery)
-
img2img_paste_fields = [
(toprow.prompt, "Prompt"),
(toprow.negative_prompt, "Negative prompt"),
(steps, "Steps"),
- (sampler_index, "Sampler"),
+ (sampler_name, "Sampler"),
(restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"),
(image_cfg_scale, "Image CFG scale"),
@@ -989,6 +991,11 @@ def create_ui(): paste_button=toprow.paste, tabname="img2img", source_text_component=toprow.prompt, source_image_component=None,
))
+ extra_networks_ui_img2img = ui_extra_networks.create_ui(img2img_interface, [img2img_generation_tab], 'img2img')
+ ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery)
+
+ extra_tabs.__exit__()
+
scripts.scripts_current = None
with gr.Blocks(analytics_enabled=False) as extras_interface:
diff --git a/modules/ui_common.py b/modules/ui_common.py index eefe0c0e..99d19ff0 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -11,7 +11,7 @@ from modules import call_queue, shared from modules.generation_parameters_copypaste import image_from_url_text
import modules.images
from modules.ui_components import ToolButton
-
+import modules.generation_parameters_copypaste as parameters_copypaste
folder_symbol = '\U0001f4c2' # 📂
refresh_symbol = '\U0001f504' # 🔄
@@ -105,8 +105,6 @@ def save_files(js_data, images, do_make_zip, index): def create_output_panel(tabname, outdir):
- from modules import shared
- import modules.generation_parameters_copypaste as parameters_copypaste
def open_folder(f):
if not os.path.exists(f):
@@ -239,13 +237,13 @@ def create_refresh_button(refresh_component, refresh_method, refreshed_args, ele for comp in refresh_components:
setattr(comp, k, v)
- return [gr.update(**(args or {})) for _ in refresh_components]
+ return [gr.update(**(args or {})) for _ in refresh_components] if len(refresh_components) > 1 else gr.update(**(args or {}))
refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id, tooltip=f"{label}: refresh" if label else "Refresh")
refresh_button.click(
fn=refresh,
inputs=[],
- outputs=[*refresh_components]
+ outputs=refresh_components
)
return refresh_button
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index f2752f10..063bd7b8 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -2,9 +2,8 @@ import os.path import urllib.parse
from pathlib import Path
-from modules import shared, ui_extra_networks_user_metadata, errors
+from modules import shared, ui_extra_networks_user_metadata, errors, extra_networks
from modules.images import read_info_from_image, save_image_with_geninfo
-from modules.ui import up_down_symbol
import gradio as gr
import json
import html
@@ -101,16 +100,7 @@ class ExtraNetworksPage: def read_user_metadata(self, item):
filename = item.get("filename", None)
- basename, ext = os.path.splitext(filename)
- metadata_filename = basename + '.json'
-
- metadata = {}
- try:
- if os.path.isfile(metadata_filename):
- with open(metadata_filename, "r", encoding="utf8") as file:
- metadata = json.load(file)
- except Exception as e:
- errors.display(e, f"reading extra network user metadata from {metadata_filename}")
+ metadata = extra_networks.get_user_metadata(filename)
desc = metadata.get("description", None)
if desc is not None:
@@ -164,7 +154,7 @@ class ExtraNetworksPage: subdirs = {"": 1, **subdirs}
subdirs_html = "".join([f"""
-<button class='lg secondary gradio-button custom-button{" search-all" if subdir=="" else ""}' onclick='extraNetworksSearchButton("{tabname}_extra_tabs", event)'>
+<button class='lg secondary gradio-button custom-button{" search-all" if subdir=="" else ""}' onclick='extraNetworksSearchButton("{tabname}_extra_search", event)'>
{html.escape(subdir if subdir!="" else "all")}
</button>
""" for subdir in subdirs])
@@ -356,7 +346,9 @@ def pages_in_preferred_order(pages): return sorted(pages, key=lambda x: tab_scores[x.name])
-def create_ui(container, button, tabname):
+def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
+ from modules.ui import switch_values_symbol
+
ui = ExtraNetworksUi()
ui.pages = []
ui.pages_contents = []
@@ -364,48 +356,42 @@ def create_ui(container, button, tabname): ui.stored_extra_pages = pages_in_preferred_order(extra_pages.copy())
ui.tabname = tabname
- with gr.Tabs(elem_id=tabname+"_extra_tabs"):
- for page in ui.stored_extra_pages:
- with gr.Tab(page.title, id=page.id_page):
- elem_id = f"{tabname}_{page.id_page}_cards_html"
- page_elem = gr.HTML('Loading...', elem_id=elem_id)
- ui.pages.append(page_elem)
+ related_tabs = []
+
+ for page in ui.stored_extra_pages:
+ with gr.Tab(page.title, id=page.id_page) as tab:
+ elem_id = f"{tabname}_{page.id_page}_cards_html"
+ page_elem = gr.HTML('Loading...', elem_id=elem_id)
+ ui.pages.append(page_elem)
- page_elem.change(fn=lambda: None, _js='function(){applyExtraNetworkFilter(' + quote_js(tabname) + '); return []}', inputs=[], outputs=[])
+ page_elem.change(fn=lambda: None, _js='function(){applyExtraNetworkFilter(' + quote_js(tabname) + '); return []}', inputs=[], outputs=[])
- editor = page.create_user_metadata_editor(ui, tabname)
- editor.create_ui()
- ui.user_metadata_editors.append(editor)
+ editor = page.create_user_metadata_editor(ui, tabname)
+ editor.create_ui()
+ ui.user_metadata_editors.append(editor)
- gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", placeholder="Search...", visible=False)
- gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", multiselect=False, visible=False, show_label=False, interactive=True)
- ToolButton(up_down_symbol, elem_id=tabname+"_extra_sortorder")
- button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh")
+ related_tabs.append(tab)
+
+ edit_search = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", elem_classes="search", placeholder="Search...", visible=False, interactive=True)
+ dropdown_sort = gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order")
+ button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes="sortorder", visible=False)
+ button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh", visible=False)
+ checkbox_show_dirs = gr.Checkbox(True, label='Show dirs', elem_id=tabname+"_extra_show_dirs", elem_classes="show-dirs", visible=False)
ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False)
ui.preview_target_filename = gr.Textbox('Preview save filename', elem_id=tabname+"_preview_filename", visible=False)
- def toggle_visibility(is_visible):
- is_visible = not is_visible
-
- return is_visible, gr.update(visible=is_visible), gr.update(variant=("secondary-down" if is_visible else "secondary"))
+ for tab in unrelated_tabs:
+ tab.select(fn=lambda: [gr.update(visible=False) for _ in range(5)], inputs=[], outputs=[edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs], show_progress=False)
- def fill_tabs(is_empty):
- """Creates HTML for extra networks' tabs when the extra networks button is clicked for the first time."""
+ for tab in related_tabs:
+ tab.select(fn=lambda: [gr.update(visible=True) for _ in range(5)], inputs=[], outputs=[edit_search, dropdown_sort, button_sortorder, button_refresh, checkbox_show_dirs], show_progress=False)
+ def pages_html():
if not ui.pages_contents:
- refresh()
+ return refresh()
- if is_empty:
- return True, *ui.pages_contents
-
- return True, *[gr.update() for _ in ui.pages_contents]
-
- state_visible = gr.State(value=False)
- button.click(fn=toggle_visibility, inputs=[state_visible], outputs=[state_visible, container, button], show_progress=False)
-
- state_empty = gr.State(value=True)
- button.click(fn=fill_tabs, inputs=[state_empty], outputs=[state_empty, *ui.pages], show_progress=False)
+ return ui.pages_contents
def refresh():
for pg in ui.stored_extra_pages:
@@ -415,6 +401,7 @@ def create_ui(container, button, tabname): return ui.pages_contents
+ interface.load(fn=pages_html, inputs=[], outputs=[*ui.pages])
button_refresh.click(fn=refresh, inputs=[], outputs=ui.pages)
return ui
diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index 891d8f2c..77885022 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -3,6 +3,7 @@ import os from modules import shared, ui_extra_networks, sd_models
from modules.ui_extra_networks import quote_js
+from modules.ui_extra_networks_checkpoints_user_metadata import CheckpointUserMetadataEditor
class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
@@ -34,3 +35,5 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): def allowed_directories_for_previews(self):
return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None]
+ def create_user_metadata_editor(self, ui, tabname):
+ return CheckpointUserMetadataEditor(ui, tabname, self)
diff --git a/modules/ui_extra_networks_checkpoints_user_metadata.py b/modules/ui_extra_networks_checkpoints_user_metadata.py new file mode 100644 index 00000000..25df0a80 --- /dev/null +++ b/modules/ui_extra_networks_checkpoints_user_metadata.py @@ -0,0 +1,66 @@ +import gradio as gr
+
+from modules import ui_extra_networks_user_metadata, sd_vae, shared
+from modules.ui_common import create_refresh_button
+
+
+class CheckpointUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor):
+ def __init__(self, ui, tabname, page):
+ super().__init__(ui, tabname, page)
+
+ self.select_vae = None
+
+ def save_user_metadata(self, name, desc, notes, vae):
+ user_metadata = self.get_user_metadata(name)
+ user_metadata["description"] = desc
+ user_metadata["notes"] = notes
+ user_metadata["vae"] = vae
+
+ self.write_user_metadata(name, user_metadata)
+
+ def update_vae(self, name):
+ if name == shared.sd_model.sd_checkpoint_info.name_for_extra:
+ sd_vae.reload_vae_weights()
+
+ def put_values_into_components(self, name):
+ user_metadata = self.get_user_metadata(name)
+ values = super().put_values_into_components(name)
+
+ return [
+ *values[0:5],
+ user_metadata.get('vae', ''),
+ ]
+
+ def create_editor(self):
+ self.create_default_editor_elems()
+
+ with gr.Row():
+ self.select_vae = gr.Dropdown(choices=["Automatic", "None"] + list(sd_vae.vae_dict), value="None", label="Preferred VAE", elem_id="checpoint_edit_user_metadata_preferred_vae")
+ create_refresh_button(self.select_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["Automatic", "None"] + list(sd_vae.vae_dict)}, "checpoint_edit_user_metadata_refresh_preferred_vae")
+
+ self.edit_notes = gr.TextArea(label='Notes', lines=4)
+
+ self.create_default_buttons()
+
+ viewed_components = [
+ self.edit_name,
+ self.edit_description,
+ self.html_filedata,
+ self.html_preview,
+ self.edit_notes,
+ self.select_vae,
+ ]
+
+ self.button_edit\
+ .click(fn=self.put_values_into_components, inputs=[self.edit_name_input], outputs=viewed_components)\
+ .then(fn=lambda: gr.update(visible=True), inputs=[], outputs=[self.box])
+
+ edited_components = [
+ self.edit_description,
+ self.edit_notes,
+ self.select_vae,
+ ]
+
+ self.setup_save_handler(self.button_save, self.save_user_metadata, edited_components)
+ self.button_save.click(fn=self.update_vae, inputs=[self.edit_name_input])
+
diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py index 1cb9eb6f..a5423fd8 100644 --- a/modules/ui_extra_networks_user_metadata.py +++ b/modules/ui_extra_networks_user_metadata.py @@ -36,8 +36,8 @@ class UserMetadataEditor: item = self.page.items.get(name, {})
user_metadata = item.get('user_metadata', None)
- if user_metadata is None:
- user_metadata = {}
+ if not user_metadata:
+ user_metadata = {'description': item.get('description', '')}
item['user_metadata'] = user_metadata
return user_metadata
diff --git a/modules/ui_settings.py b/modules/ui_settings.py index a6076bf3..6dde4b6a 100644 --- a/modules/ui_settings.py +++ b/modules/ui_settings.py @@ -158,7 +158,7 @@ class UiSettings: loadsave.create_ui()
with gr.TabItem("Sysinfo", id="sysinfo", elem_id="settings_tab_sysinfo"):
- gr.HTML('<a href="./internal/sysinfo-download" class="sysinfo_big_link" download>Download system info</a><br /><a href="./internal/sysinfo">(or open as text in a new page)</a>', elem_id="sysinfo_download")
+ gr.HTML('<a href="./internal/sysinfo-download" class="sysinfo_big_link" download>Download system info</a><br /><a href="./internal/sysinfo" target="_blank">(or open as text in a new page)</a>', elem_id="sysinfo_download")
with gr.Row():
with gr.Column(scale=1):
diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index fb75137e..506017e5 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -57,8 +57,9 @@ def save_pil_to_file(self, pil_image, dir=None, format="png"): return file_obj.name
-# override save to file function so that it also writes PNG info
-gradio.components.IOComponent.pil_to_temp_file = save_pil_to_file
+def install_ui_tempdir_override():
+ """override save to file function so that it also writes PNG info"""
+ gradio.components.IOComponent.pil_to_temp_file = save_pil_to_file
def on_tmpdir_changed():
diff --git a/modules/util.py b/modules/util.py new file mode 100644 index 00000000..60afc067 --- /dev/null +++ b/modules/util.py @@ -0,0 +1,58 @@ +import os
+import re
+
+from modules import shared
+from modules.paths_internal import script_path
+
+
+def natural_sort_key(s, regex=re.compile('([0-9]+)')):
+ return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)]
+
+
+def listfiles(dirname):
+ filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")]
+ return [file for file in filenames if os.path.isfile(file)]
+
+
+def html_path(filename):
+ return os.path.join(script_path, "html", filename)
+
+
+def html(filename):
+ path = html_path(filename)
+
+ if os.path.exists(path):
+ with open(path, encoding="utf8") as file:
+ return file.read()
+
+ return ""
+
+
+def walk_files(path, allowed_extensions=None):
+ if not os.path.exists(path):
+ return
+
+ if allowed_extensions is not None:
+ allowed_extensions = set(allowed_extensions)
+
+ items = list(os.walk(path, followlinks=True))
+ items = sorted(items, key=lambda x: natural_sort_key(x[0]))
+
+ for root, _, files in items:
+ for filename in sorted(files, key=natural_sort_key):
+ if allowed_extensions is not None:
+ _, ext = os.path.splitext(filename)
+ if ext not in allowed_extensions:
+ continue
+
+ if not shared.opts.list_hidden_files and ("/." in root or "\\." in root):
+ continue
+
+ yield os.path.join(root, filename)
+
+
+def ldm_print(*args, **kwargs):
+ if shared.opts.hide_ldm_prints:
+ return
+
+ print(*args, **kwargs)
diff --git a/requirements.txt b/requirements.txt index 9a47d6d0..d83092f0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,7 @@ basicsr blendmodes
clean-fid
einops
+fastapi>=0.90.1
gfpgan
gradio==3.39.0
inflection
@@ -140,6 +140,10 @@ div.styler{ background: var(--background-fill-primary);
}
+.block.gradio-textbox{
+ overflow: visible !important;
+}
+
/* general styled components */
@@ -497,6 +501,13 @@ table.popup-table .link{ font-size: 18pt;
}
+#settings .settings-info{
+ max-width: 48em;
+ border: 1px dotted #777;
+ margin: 0;
+ padding: 1em;
+}
+
/* live preview */
.progressDiv{
@@ -782,9 +793,14 @@ footer { /* extra networks UI */
.extra-network-cards{
- height: 725px;
- overflow: scroll;
+ height: calc(100vh - 24rem);
+ overflow: clip scroll;
resize: vertical;
+ min-height: 52rem;
+}
+
+.extra-networks > div.tab-nav{
+ min-height: 3.4rem;
}
.extra-networks > div > [id *= '_extra_']{
@@ -799,10 +815,12 @@ footer { margin: 0 0.15em;
}
.extra-networks .tab-nav .search,
-.extra-networks .tab-nav .sort{
- display: inline-block;
+.extra-networks .tab-nav .sort,
+.extra-networks .tab-nav .show-dirs
+{
margin: 0.3em;
align-self: center;
+ width: auto;
}
.extra-networks .tab-nav .search {
@@ -1,347 +1,41 @@ from __future__ import annotations
import os
-import sys
import time
-import importlib
-import signal
-import re
-import warnings
-import json
-from threading import Thread
-from typing import Iterable
-
-from fastapi import FastAPI
-from fastapi.middleware.cors import CORSMiddleware
-from fastapi.middleware.gzip import GZipMiddleware
-
-import logging
-
-# We can't use cmd_opts for this because it will not have been initialized at this point.
-log_level = os.environ.get("SD_WEBUI_LOG_LEVEL")
-if log_level:
- log_level = getattr(logging, log_level.upper(), None) or logging.INFO
- logging.basicConfig(
- level=log_level,
- format='%(asctime)s %(levelname)s [%(name)s] %(message)s',
- datefmt='%Y-%m-%d %H:%M:%S',
- )
-
-logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
-logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
from modules import timer
+from modules import initialize_util
+from modules import initialize
+
startup_timer = timer.startup_timer
startup_timer.record("launcher")
-import torch
-import pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them
-warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning")
-warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")
-startup_timer.record("import torch")
-
-import gradio # noqa: F401
-startup_timer.record("import gradio")
-
-from modules import paths, timer, import_hook, errors, devices # noqa: F401
-startup_timer.record("setup paths")
-
-import ldm.modules.encoders.modules # noqa: F401
-startup_timer.record("import ldm")
-
-
-from modules import extra_networks
-from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, queue_lock # noqa: F401
-
-# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
-if ".dev" in torch.__version__ or "+git" in torch.__version__:
- torch.__long_version__ = torch.__version__
- torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
-
-from modules import shared
-
-if not shared.cmd_opts.skip_version_check:
- errors.check_versions()
-
-import modules.codeformer_model as codeformer
-import modules.gfpgan_model as gfpgan
-from modules import sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states
-import modules.face_restoration
-import modules.img2img
-
-import modules.lowvram
-import modules.scripts
-import modules.sd_hijack
-import modules.sd_hijack_optimizations
-import modules.sd_models
-import modules.sd_vae
-import modules.sd_unet
-import modules.txt2img
-import modules.script_callbacks
-import modules.textual_inversion.textual_inversion
-import modules.progress
-
-import modules.ui
-from modules import modelloader
-from modules.shared import cmd_opts
-import modules.hypernetworks.hypernetwork
-
-startup_timer.record("other imports")
-
-
-if cmd_opts.server_name:
- server_name = cmd_opts.server_name
-else:
- server_name = "0.0.0.0" if cmd_opts.listen else None
-
-
-def fix_asyncio_event_loop_policy():
- """
- The default `asyncio` event loop policy only automatically creates
- event loops in the main threads. Other threads must create event
- loops explicitly or `asyncio.get_event_loop` (and therefore
- `.IOLoop.current`) will fail. Installing this policy allows event
- loops to be created automatically on any thread, matching the
- behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
- """
-
- import asyncio
-
- if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
- # "Any thread" and "selector" should be orthogonal, but there's not a clean
- # interface for composing policies so pick the right base.
- _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore
- else:
- _BasePolicy = asyncio.DefaultEventLoopPolicy
-
- class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore
- """Event loop policy that allows loop creation on any thread.
- Usage::
-
- asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
- """
-
- def get_event_loop(self) -> asyncio.AbstractEventLoop:
- try:
- return super().get_event_loop()
- except (RuntimeError, AssertionError):
- # This was an AssertionError in python 3.4.2 (which ships with debian jessie)
- # and changed to a RuntimeError in 3.4.3.
- # "There is no current event loop in thread %r"
- loop = self.new_event_loop()
- self.set_event_loop(loop)
- return loop
-
- asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
-
-
-def restore_config_state_file():
- config_state_file = shared.opts.restore_config_state_file
- if config_state_file == "":
- return
-
- shared.opts.restore_config_state_file = ""
- shared.opts.save(shared.config_filename)
-
- if os.path.isfile(config_state_file):
- print(f"*** About to restore extension state from file: {config_state_file}")
- with open(config_state_file, "r", encoding="utf-8") as f:
- config_state = json.load(f)
- config_states.restore_extension_config(config_state)
- startup_timer.record("restore extension config")
- elif config_state_file:
- print(f"!!! Config state backup not found: {config_state_file}")
-
-
-def validate_tls_options():
- if not (cmd_opts.tls_keyfile and cmd_opts.tls_certfile):
- return
-
- try:
- if not os.path.exists(cmd_opts.tls_keyfile):
- print("Invalid path to TLS keyfile given")
- if not os.path.exists(cmd_opts.tls_certfile):
- print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
- except TypeError:
- cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
- print("TLS setup invalid, running webui without TLS")
- else:
- print("Running with TLS")
- startup_timer.record("TLS")
-
-
-def get_gradio_auth_creds() -> Iterable[tuple[str, ...]]:
- """
- Convert the gradio_auth and gradio_auth_path commandline arguments into
- an iterable of (username, password) tuples.
- """
- def process_credential_line(s) -> tuple[str, ...] | None:
- s = s.strip()
- if not s:
- return None
- return tuple(s.split(':', 1))
-
- if cmd_opts.gradio_auth:
- for cred in cmd_opts.gradio_auth.split(','):
- cred = process_credential_line(cred)
- if cred:
- yield cred
-
- if cmd_opts.gradio_auth_path:
- with open(cmd_opts.gradio_auth_path, 'r', encoding="utf8") as file:
- for line in file.readlines():
- for cred in line.strip().split(','):
- cred = process_credential_line(cred)
- if cred:
- yield cred
-
-
-def configure_sigint_handler():
- # make the program just exit at ctrl+c without waiting for anything
- def sigint_handler(sig, frame):
- print(f'Interrupted with signal {sig} in {frame}')
- os._exit(0)
-
- if not os.environ.get("COVERAGE_RUN"):
- # Don't install the immediate-quit handler when running under coverage,
- # as then the coverage report won't be generated.
- signal.signal(signal.SIGINT, sigint_handler)
-
-
-def configure_opts_onchange():
- shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()), call=False)
- shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
- shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
- shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed)
- shared.opts.onchange("gradio_theme", shared.reload_gradio_theme)
- shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: modules.sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False)
- startup_timer.record("opts onchange")
-
-
-def initialize():
- fix_asyncio_event_loop_policy()
- validate_tls_options()
- configure_sigint_handler()
- modelloader.cleanup_models()
- configure_opts_onchange()
-
- modules.sd_models.setup_model()
- startup_timer.record("setup SD model")
-
- codeformer.setup_model(cmd_opts.codeformer_models_path)
- startup_timer.record("setup codeformer")
+initialize.imports()
- gfpgan.setup_model(cmd_opts.gfpgan_models_path)
- startup_timer.record("setup gfpgan")
-
- initialize_rest(reload_script_modules=False)
-
-
-def initialize_rest(*, reload_script_modules=False):
- """
- Called both from initialize() and when reloading the webui.
- """
- sd_samplers.set_samplers()
- extensions.list_extensions()
- startup_timer.record("list extensions")
-
- restore_config_state_file()
-
- if cmd_opts.ui_debug_mode:
- shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
- modules.scripts.load_scripts()
- return
-
- modules.sd_models.list_models()
- startup_timer.record("list SD models")
-
- localization.list_localizations(cmd_opts.localizations_dir)
-
- with startup_timer.subcategory("load scripts"):
- modules.scripts.load_scripts()
-
- if reload_script_modules:
- for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
- importlib.reload(module)
- startup_timer.record("reload script modules")
-
- modelloader.load_upscalers()
- startup_timer.record("load upscalers")
-
- modules.sd_vae.refresh_vae_list()
- startup_timer.record("refresh VAE")
- modules.textual_inversion.textual_inversion.list_textual_inversion_templates()
- startup_timer.record("refresh textual inversion templates")
-
- modules.script_callbacks.on_list_optimizers(modules.sd_hijack_optimizations.list_optimizers)
- modules.sd_hijack.list_optimizers()
- startup_timer.record("scripts list_optimizers")
-
- modules.sd_unet.list_unets()
- startup_timer.record("scripts list_unets")
-
- def load_model():
- """
- Accesses shared.sd_model property to load model.
- After it's available, if it has been loaded before this access by some extension,
- its optimization may be None because the list of optimizaers has neet been filled
- by that time, so we apply optimization again.
- """
-
- shared.sd_model # noqa: B018
-
- if modules.sd_hijack.current_optimizer is None:
- modules.sd_hijack.apply_optimizations()
-
- devices.first_time_calculation()
-
- Thread(target=load_model).start()
-
- shared.reload_hypernetworks()
- startup_timer.record("reload hypernetworks")
-
- ui_extra_networks.initialize()
- ui_extra_networks.register_default_pages()
-
- extra_networks.initialize()
- extra_networks.register_default_extra_networks()
- startup_timer.record("initialize extra networks")
-
-
-def setup_middleware(app):
- app.middleware_stack = None # reset current middleware to allow modifying user provided list
- app.add_middleware(GZipMiddleware, minimum_size=1000)
- configure_cors_middleware(app)
- app.build_middleware_stack() # rebuild middleware stack on-the-fly
-
-
-def configure_cors_middleware(app):
- cors_options = {
- "allow_methods": ["*"],
- "allow_headers": ["*"],
- "allow_credentials": True,
- }
- if cmd_opts.cors_allow_origins:
- cors_options["allow_origins"] = cmd_opts.cors_allow_origins.split(',')
- if cmd_opts.cors_allow_origins_regex:
- cors_options["allow_origin_regex"] = cmd_opts.cors_allow_origins_regex
- app.add_middleware(CORSMiddleware, **cors_options)
+initialize.check_versions()
def create_api(app):
from modules.api.api import Api
+ from modules.call_queue import queue_lock
+
api = Api(app, queue_lock)
return api
def api_only():
- initialize()
+ from fastapi import FastAPI
+ from modules.shared_cmd_options import cmd_opts
+
+ initialize.initialize()
app = FastAPI()
- setup_middleware(app)
+ initialize_util.setup_middleware(app)
api = create_api(app)
- modules.script_callbacks.app_started_callback(None, app)
+ from modules import script_callbacks
+ script_callbacks.before_ui_callback()
+ script_callbacks.app_started_callback(None, app)
print(f"Startup time: {startup_timer.summary()}.")
api.launch(
@@ -352,35 +46,46 @@ def api_only(): def webui():
+ from modules.shared_cmd_options import cmd_opts
+
launch_api = cmd_opts.api
- initialize()
+ initialize.initialize()
+
+ from modules import shared, ui_tempdir, script_callbacks, ui, progress, ui_extra_networks
while 1:
if shared.opts.clean_temp_dir_at_start:
ui_tempdir.cleanup_tmpdr()
startup_timer.record("cleanup temp dir")
- modules.script_callbacks.before_ui_callback()
+ script_callbacks.before_ui_callback()
startup_timer.record("scripts before_ui_callback")
- shared.demo = modules.ui.create_ui()
+ shared.demo = ui.create_ui()
startup_timer.record("create ui")
if not cmd_opts.no_gradio_queue:
shared.demo.queue(64)
- gradio_auth_creds = list(get_gradio_auth_creds()) or None
+ gradio_auth_creds = list(initialize_util.get_gradio_auth_creds()) or None
+
+ auto_launch_browser = False
+ if os.getenv('SD_WEBUI_RESTARTING') != '1':
+ if shared.opts.auto_launch_browser == "Remote" or cmd_opts.autolaunch:
+ auto_launch_browser = True
+ elif shared.opts.auto_launch_browser == "Local":
+ auto_launch_browser = not any([cmd_opts.listen, cmd_opts.share, cmd_opts.ngrok])
app, local_url, share_url = shared.demo.launch(
share=cmd_opts.share,
- server_name=server_name,
+ server_name=initialize_util.gradio_server_name(),
server_port=cmd_opts.port,
ssl_keyfile=cmd_opts.tls_keyfile,
ssl_certfile=cmd_opts.tls_certfile,
ssl_verify=cmd_opts.disable_tls_verify,
debug=cmd_opts.gradio_debug,
auth=gradio_auth_creds,
- inbrowser=cmd_opts.autolaunch and os.getenv('SD_WEBUI_RESTARTING') != '1',
+ inbrowser=auto_launch_browser,
prevent_thread_lock=True,
allowed_paths=cmd_opts.gradio_allowed_path,
app_kwargs={
@@ -390,9 +95,6 @@ def webui(): root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else "",
)
- # after initial launch, disable --autolaunch for subsequent restarts
- cmd_opts.autolaunch = False
-
startup_timer.record("gradio launch")
# gradio uses a very open CORS policy via app.user_middleware, which makes it possible for
@@ -401,10 +103,10 @@ def webui(): # running its code. We disable this here. Suggested by RyotaK.
app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']
- setup_middleware(app)
+ initialize_util.setup_middleware(app)
- modules.progress.setup_progress_api(app)
- modules.ui.setup_ui_api(app)
+ progress.setup_progress_api(app)
+ ui.setup_ui_api(app)
if launch_api:
create_api(app)
@@ -414,7 +116,7 @@ def webui(): startup_timer.record("add APIs")
with startup_timer.subcategory("app_started_callback"):
- modules.script_callbacks.app_started_callback(shared.demo, app)
+ script_callbacks.app_started_callback(shared.demo, app)
timer.startup_record = startup_timer.dump()
print(f"Startup time: {startup_timer.summary()}.")
@@ -437,18 +139,23 @@ def webui(): shared.demo.close()
break
+ # disable auto launch webui in browser for subsequent UI Reload
+ os.environ.setdefault('SD_WEBUI_RESTARTING', '1')
+
print('Restarting UI...')
shared.demo.close()
time.sleep(0.5)
startup_timer.reset()
- modules.script_callbacks.app_reload_callback()
+ script_callbacks.app_reload_callback()
startup_timer.record("app reload callback")
- modules.script_callbacks.script_unloaded_callback()
+ script_callbacks.script_unloaded_callback()
startup_timer.record("scripts unloaded callback")
- initialize_rest(reload_script_modules=True)
+ initialize.initialize_rest(reload_script_modules=True)
if __name__ == "__main__":
+ from modules.shared_cmd_options import cmd_opts
+
if cmd_opts.nowebui:
api_only()
else:
|