diff options
-rw-r--r-- | .gitignore | 1 | ||||
-rw-r--r-- | javascript/contextMenus.js | 2 | ||||
-rw-r--r-- | javascript/dragdrop.js | 5 | ||||
-rw-r--r-- | javascript/hints.js | 9 | ||||
-rw-r--r-- | javascript/imageParams.js | 19 | ||||
-rw-r--r-- | javascript/images_history.js | 198 | ||||
-rw-r--r-- | javascript/notification.js | 2 | ||||
-rw-r--r-- | javascript/ui.js | 8 | ||||
-rw-r--r-- | launch.py | 2 | ||||
-rw-r--r-- | modules/deepbooru.py | 26 | ||||
-rw-r--r-- | modules/devices.py | 2 | ||||
-rw-r--r-- | modules/extras.py | 46 | ||||
-rw-r--r-- | modules/generation_parameters_copypaste.py | 9 | ||||
-rw-r--r-- | modules/hypernetworks/hypernetwork.py | 29 | ||||
-rw-r--r-- | modules/images.py | 24 | ||||
-rw-r--r-- | modules/images_history.py | 165 | ||||
-rw-r--r-- | modules/interrogate.py | 21 | ||||
-rw-r--r-- | modules/processing.py | 75 | ||||
-rw-r--r-- | modules/safe.py | 9 | ||||
-rw-r--r-- | modules/sd_models.py | 7 | ||||
-rw-r--r-- | modules/shared.py | 25 | ||||
-rw-r--r-- | modules/textual_inversion/preprocess.py | 4 | ||||
-rw-r--r-- | modules/textual_inversion/textual_inversion.py | 25 | ||||
-rw-r--r-- | modules/txt2img.py | 5 | ||||
-rw-r--r-- | modules/ui.py | 184 | ||||
-rw-r--r-- | scripts/img2imgalt.py | 36 | ||||
-rw-r--r-- | scripts/xy_grid.py | 68 | ||||
-rw-r--r-- | style.css | 31 | ||||
-rw-r--r-- | webui.py | 2 |
29 files changed, 863 insertions, 176 deletions
@@ -17,6 +17,7 @@ __pycache__ /webui.settings.bat /embeddings /styles.csv +/params.txt /styles.csv.bak /webui-user.bat /webui-user.sh diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js index 7636c4b3..fe67c42e 100644 --- a/javascript/contextMenus.js +++ b/javascript/contextMenus.js @@ -94,7 +94,7 @@ contextMenuInit = function(){ }
gradioApp().addEventListener("click", function(e) {
let source = e.composedPath()[0]
- if(source.id && source.indexOf('check_progress')>-1){
+ if(source.id && source.id.indexOf('check_progress')>-1){
return
}
diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js index 5aac57f7..fe0185a5 100644 --- a/javascript/dragdrop.js +++ b/javascript/dragdrop.js @@ -43,7 +43,7 @@ function dropReplaceImage( imgWrap, files ) { window.document.addEventListener('dragover', e => { const target = e.composedPath()[0]; const imgWrap = target.closest('[data-testid="image"]'); - if ( !imgWrap ) { + if ( !imgWrap && target.placeholder != "Prompt") { return; } e.stopPropagation(); @@ -53,6 +53,9 @@ window.document.addEventListener('dragover', e => { window.document.addEventListener('drop', e => { const target = e.composedPath()[0]; + if (target.placeholder === "Prompt") { + return; + } const imgWrap = target.closest('[data-testid="image"]'); if ( !imgWrap ) { return; diff --git a/javascript/hints.js b/javascript/hints.js index d51ee14c..8fec907d 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -14,7 +14,7 @@ titles = { "\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time", "\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomed", "\u{1f3a8}": "Add a random artist to the prompt.", - "\u2199\ufe0f": "Read generation parameters from prompt into user interface.", + "\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.", "\u{1f4c2}": "Open images output directory", "Inpaint a part of image": "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt", @@ -83,7 +83,12 @@ titles = { "Do not add watermark to images": "If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.", "Filename word regex": "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.", - "Filename join string": "This string will be used to hoin split words into a single line if the option above is enabled.", + "Filename join string": "This string will be used to join split words into a single line if the option above is enabled.", + + "Quicksettings list": "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.", + + "Weighted sum": "Result = A * (1 - M) + B * M", + "Add difference": "Result = A + (B - C) * M", } diff --git a/javascript/imageParams.js b/javascript/imageParams.js new file mode 100644 index 00000000..4a7b0900 --- /dev/null +++ b/javascript/imageParams.js @@ -0,0 +1,19 @@ +window.onload = (function(){ + window.addEventListener('drop', e => { + const target = e.composedPath()[0]; + const idx = selected_gallery_index(); + if (target.placeholder != "Prompt") return; + + let prompt_target = get_tab_index('tabs') == 1 ? "img2img_prompt_image" : "txt2img_prompt_image"; + + e.stopPropagation(); + e.preventDefault(); + const imgParent = gradioApp().getElementById(prompt_target); + const files = e.dataTransfer.files; + const fileInput = imgParent.querySelector('input[type="file"]'); + if ( fileInput ) { + fileInput.files = files; + fileInput.dispatchEvent(new Event('change')); + } + }); +}); diff --git a/javascript/images_history.js b/javascript/images_history.js new file mode 100644 index 00000000..3a20056b --- /dev/null +++ b/javascript/images_history.js @@ -0,0 +1,198 @@ +var images_history_click_image = function(){ + if (!this.classList.contains("transform")){ + var gallery = images_history_get_parent_by_class(this, "images_history_cantainor"); + var buttons = gallery.querySelectorAll(".gallery-item"); + var i = 0; + var hidden_list = []; + buttons.forEach(function(e){ + if (e.style.display == "none"){ + hidden_list.push(i); + } + i += 1; + }) + if (hidden_list.length > 0){ + setTimeout(images_history_hide_buttons, 10, hidden_list, gallery); + } + } + images_history_set_image_info(this); +} + +var images_history_click_tab = function(){ + var tabs_box = gradioApp().getElementById("images_history_tab"); + if (!tabs_box.classList.contains(this.getAttribute("tabname"))) { + gradioApp().getElementById(this.getAttribute("tabname") + "_images_history_renew_page").click(); + tabs_box.classList.add(this.getAttribute("tabname")) + } +} + +function images_history_disabled_del(){ + gradioApp().querySelectorAll(".images_history_del_button").forEach(function(btn){ + btn.setAttribute('disabled','disabled'); + }); +} + +function images_history_get_parent_by_class(item, class_name){ + var parent = item.parentElement; + while(!parent.classList.contains(class_name)){ + parent = parent.parentElement; + } + return parent; +} + +function images_history_get_parent_by_tagname(item, tagname){ + var parent = item.parentElement; + tagname = tagname.toUpperCase() + while(parent.tagName != tagname){ + console.log(parent.tagName, tagname) + parent = parent.parentElement; + } + return parent; +} + +function images_history_hide_buttons(hidden_list, gallery){ + var buttons = gallery.querySelectorAll(".gallery-item"); + var num = 0; + buttons.forEach(function(e){ + if (e.style.display == "none"){ + num += 1; + } + }); + if (num == hidden_list.length){ + setTimeout(images_history_hide_buttons, 10, hidden_list, gallery); + } + for( i in hidden_list){ + buttons[hidden_list[i]].style.display = "none"; + } +} + +function images_history_set_image_info(button){ + var buttons = images_history_get_parent_by_tagname(button, "DIV").querySelectorAll(".gallery-item"); + var index = -1; + var i = 0; + buttons.forEach(function(e){ + if(e == button){ + index = i; + } + if(e.style.display != "none"){ + i += 1; + } + }); + var gallery = images_history_get_parent_by_class(button, "images_history_cantainor"); + var set_btn = gallery.querySelector(".images_history_set_index"); + var curr_idx = set_btn.getAttribute("img_index", index); + if (curr_idx != index) { + set_btn.setAttribute("img_index", index); + images_history_disabled_del(); + } + set_btn.click(); + +} + +function images_history_get_current_img(tabname, image_path, files){ + return [ + gradioApp().getElementById(tabname + '_images_history_set_index').getAttribute("img_index"), + image_path, + files + ]; +} + +function images_history_delete(del_num, tabname, img_path, img_file_name, page_index, filenames, image_index){ + image_index = parseInt(image_index); + var tab = gradioApp().getElementById(tabname + '_images_history'); + var set_btn = tab.querySelector(".images_history_set_index"); + var buttons = []; + tab.querySelectorAll(".gallery-item").forEach(function(e){ + if (e.style.display != 'none'){ + buttons.push(e); + } + }); + var img_num = buttons.length / 2; + if (img_num <= del_num){ + setTimeout(function(tabname){ + gradioApp().getElementById(tabname + '_images_history_renew_page').click(); + }, 30, tabname); + } else { + var next_img + for (var i = 0; i < del_num; i++){ + if (image_index + i < image_index + img_num){ + buttons[image_index + i].style.display = 'none'; + buttons[image_index + img_num + 1].style.display = 'none'; + next_img = image_index + i + 1 + } + } + var bnt; + if (next_img >= img_num){ + btn = buttons[image_index - del_num]; + } else { + btn = buttons[next_img]; + } + setTimeout(function(btn){btn.click()}, 30, btn); + } + images_history_disabled_del(); + return [del_num, tabname, img_path, img_file_name, page_index, filenames, image_index]; +} + +function images_history_turnpage(img_path, page_index, image_index, tabname){ + var buttons = gradioApp().getElementById(tabname + '_images_history').querySelectorAll(".gallery-item"); + buttons.forEach(function(elem) { + elem.style.display = 'block'; + }) + return [img_path, page_index, image_index, tabname]; +} + +function images_history_enable_del_buttons(){ + gradioApp().querySelectorAll(".images_history_del_button").forEach(function(btn){ + btn.removeAttribute('disabled'); + }) +} + +function images_history_init(){ + var load_txt2img_button = gradioApp().getElementById('txt2img_images_history_renew_page') + if (load_txt2img_button){ + for (var i in images_history_tab_list ){ + tab = images_history_tab_list[i]; + gradioApp().getElementById(tab + '_images_history').classList.add("images_history_cantainor"); + gradioApp().getElementById(tab + '_images_history_set_index').classList.add("images_history_set_index"); + gradioApp().getElementById(tab + '_images_history_del_button').classList.add("images_history_del_button"); + gradioApp().getElementById(tab + '_images_history_gallery').classList.add("images_history_gallery"); + + } + var tabs_box = gradioApp().getElementById("tab_images_history").querySelector("div").querySelector("div").querySelector("div"); + tabs_box.setAttribute("id", "images_history_tab"); + var tab_btns = tabs_box.querySelectorAll("button"); + for (var i in images_history_tab_list){ + var tabname = images_history_tab_list[i] + tab_btns[i].setAttribute("tabname", tabname); + tab_btns[i].addEventListener('click', images_history_click_tab); + } + tabs_box.classList.add(images_history_tab_list[0]); + load_txt2img_button.click(); + } else { + setTimeout(images_history_init, 500); + } +} + +var images_history_tab_list = ["txt2img", "img2img", "extras"]; +setTimeout(images_history_init, 500); +document.addEventListener("DOMContentLoaded", function() { + var mutationObserver = new MutationObserver(function(m){ + for (var i in images_history_tab_list ){ + let tabname = images_history_tab_list[i] + var buttons = gradioApp().querySelectorAll('#' + tabname + '_images_history .gallery-item'); + buttons.forEach(function(bnt){ + bnt.addEventListener('click', images_history_click_image, true); + }); + var cls_btn = gradioApp().getElementById(tabname + '_images_history_gallery').querySelector("svg"); + if (cls_btn){ + cls_btn.addEventListener('click', function(){ + gradioApp().getElementById(tabname + '_images_history_renew_page').click(); + }, false); + } + + } + }); + mutationObserver.observe( gradioApp(), { childList:true, subtree:true }); + +}); + + diff --git a/javascript/notification.js b/javascript/notification.js index bdf614ad..f96de313 100644 --- a/javascript/notification.js +++ b/javascript/notification.js @@ -36,7 +36,7 @@ onUiUpdate(function(){ const notification = new Notification( 'Stable Diffusion', { - body: `Generated ${imgs.size > 1 ? imgs.size - 1 : 1} image${imgs.size > 1 ? 's' : ''}`, + body: `Generated ${imgs.size > 1 ? imgs.size - opts.return_grid : 1} image${imgs.size > 1 ? 's' : ''}`, icon: headImg, image: headImg, } diff --git a/javascript/ui.js b/javascript/ui.js index 4100944e..0f8fe68e 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -33,27 +33,27 @@ function args_to_array(args){ } function switch_to_txt2img(){ - gradioApp().querySelectorAll('button')[0].click(); + gradioApp().querySelector('#tabs').querySelectorAll('button')[0].click(); return args_to_array(arguments); } function switch_to_img2img_img2img(){ - gradioApp().querySelectorAll('button')[1].click(); + gradioApp().querySelector('#tabs').querySelectorAll('button')[1].click(); gradioApp().getElementById('mode_img2img').querySelectorAll('button')[0].click(); return args_to_array(arguments); } function switch_to_img2img_inpaint(){ - gradioApp().querySelectorAll('button')[1].click(); + gradioApp().querySelector('#tabs').querySelectorAll('button')[1].click(); gradioApp().getElementById('mode_img2img').querySelectorAll('button')[1].click(); return args_to_array(arguments); } function switch_to_extras(){ - gradioApp().querySelectorAll('button')[2].click(); + gradioApp().querySelector('#tabs').querySelectorAll('button')[2].click(); return args_to_array(arguments); } @@ -76,7 +76,7 @@ def git_clone(url, dir, name, commithash=None): return
run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
- run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commint for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
+ run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
return
run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 419e6a9c..f34f3788 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -19,6 +19,7 @@ def get_deepbooru_tags(pil_image): release_process() +OPT_INCLUDE_RANKS = "include_ranks" def create_deepbooru_opts(): from modules import shared @@ -26,6 +27,7 @@ def create_deepbooru_opts(): "use_spaces": shared.opts.deepbooru_use_spaces, "use_escape": shared.opts.deepbooru_escape, "alpha_sort": shared.opts.deepbooru_sort_alpha, + OPT_INCLUDE_RANKS: shared.opts.interrogate_return_ranks, } @@ -113,6 +115,7 @@ def get_deepbooru_tags_from_model(model, tags, pil_image, threshold, deepbooru_o alpha_sort = deepbooru_opts['alpha_sort'] use_spaces = deepbooru_opts['use_spaces'] use_escape = deepbooru_opts['use_escape'] + include_ranks = deepbooru_opts['include_ranks'] width = model.input_shape[2] height = model.input_shape[1] @@ -151,19 +154,20 @@ def get_deepbooru_tags_from_model(model, tags, pil_image, threshold, deepbooru_o if alpha_sort: sort_ndx = 1 - # sort by reverse by likelihood and normal for alpha + # sort by reverse by likelihood and normal for alpha, and format tag text as requested unsorted_tags_in_theshold.sort(key=lambda y: y[sort_ndx], reverse=(not alpha_sort)) for weight, tag in unsorted_tags_in_theshold: - result_tags_out.append(tag) + # note: tag_outformat will still have a colon if include_ranks is True + tag_outformat = tag.replace(':', ' ') + if use_spaces: + tag_outformat = tag_outformat.replace('_', ' ') + if use_escape: + tag_outformat = re.sub(re_special, r'\\\1', tag_outformat) + if include_ranks: + tag_outformat = f"({tag_outformat}:{weight:.3f})" - print('\n'.join(sorted(result_tags_print, reverse=True))) - - tags_text = ', '.join(result_tags_out) + result_tags_out.append(tag_outformat) - if use_spaces: - tags_text = tags_text.replace('_', ' ') - - if use_escape: - tags_text = re.sub(re_special, r'\\\1', tags_text) + print('\n'.join(sorted(result_tags_print, reverse=True))) - return tags_text.replace(':', ' ') + return ', '.join(result_tags_out) diff --git a/modules/devices.py b/modules/devices.py index 03ef58f1..eb422583 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -34,7 +34,7 @@ def enable_tf32(): errors.run(enable_tf32, "Enabling TF32") -device = device_gfpgan = device_bsrgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device() +device = device_interrogate = device_gfpgan = device_bsrgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device() dtype = torch.float16 dtype_vae = torch.float16 diff --git a/modules/extras.py b/modules/extras.py index b24d7de3..f2f5a7b0 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -159,48 +159,52 @@ def run_pnginfo(image): return '', geninfo, info
-def run_modelmerger(primary_model_name, secondary_model_name, interp_method, interp_amount, save_as_half, custom_name):
- # Linear interpolation (https://en.wikipedia.org/wiki/Linear_interpolation)
- def weighted_sum(theta0, theta1, alpha):
+def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_name, interp_method, multiplier, save_as_half, custom_name):
+ def weighted_sum(theta0, theta1, theta2, alpha):
return ((1 - alpha) * theta0) + (alpha * theta1)
- # Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
- def sigmoid(theta0, theta1, alpha):
- alpha = alpha * alpha * (3 - (2 * alpha))
- return theta0 + ((theta1 - theta0) * alpha)
-
- # Inverse Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
- def inv_sigmoid(theta0, theta1, alpha):
- import math
- alpha = 0.5 - math.sin(math.asin(1.0 - 2.0 * alpha) / 3.0)
- return theta0 + ((theta1 - theta0) * alpha)
+ def add_difference(theta0, theta1, theta2, alpha):
+ return theta0 + (theta1 - theta2) * alpha
primary_model_info = sd_models.checkpoints_list[primary_model_name]
secondary_model_info = sd_models.checkpoints_list[secondary_model_name]
+ teritary_model_info = sd_models.checkpoints_list.get(teritary_model_name, None)
print(f"Loading {primary_model_info.filename}...")
primary_model = torch.load(primary_model_info.filename, map_location='cpu')
+ theta_0 = sd_models.get_state_dict_from_checkpoint(primary_model)
print(f"Loading {secondary_model_info.filename}...")
secondary_model = torch.load(secondary_model_info.filename, map_location='cpu')
-
- theta_0 = sd_models.get_state_dict_from_checkpoint(primary_model)
theta_1 = sd_models.get_state_dict_from_checkpoint(secondary_model)
+ if teritary_model_info is not None:
+ print(f"Loading {teritary_model_info.filename}...")
+ teritary_model = torch.load(teritary_model_info.filename, map_location='cpu')
+ theta_2 = sd_models.get_state_dict_from_checkpoint(teritary_model)
+ else:
+ theta_2 = None
+
theta_funcs = {
- "Weighted Sum": weighted_sum,
- "Sigmoid": sigmoid,
- "Inverse Sigmoid": inv_sigmoid,
+ "Weighted sum": weighted_sum,
+ "Add difference": add_difference,
}
theta_func = theta_funcs[interp_method]
print(f"Merging...")
+
for key in tqdm.tqdm(theta_0.keys()):
if 'model' in key and key in theta_1:
- theta_0[key] = theta_func(theta_0[key], theta_1[key], (float(1.0) - interp_amount)) # Need to reverse the interp_amount to match the desired mix ration in the merged checkpoint
+ t2 = (theta_2 or {}).get(key)
+ if t2 is None:
+ t2 = torch.zeros_like(theta_0[key])
+
+ theta_0[key] = theta_func(theta_0[key], theta_1[key], t2, multiplier)
+
if save_as_half:
theta_0[key] = theta_0[key].half()
+ # I believe this part should be discarded, but I'll leave it for now until I am sure
for key in theta_1.keys():
if 'model' in key and key not in theta_0:
theta_0[key] = theta_1[key]
@@ -209,7 +213,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, interp_method, int ckpt_dir = shared.cmd_opts.ckpt_dir or sd_models.model_path
- filename = primary_model_info.model_name + '_' + str(round(interp_amount, 2)) + '-' + secondary_model_info.model_name + '_' + str(round((float(1.0) - interp_amount), 2)) + '-' + interp_method.replace(" ", "_") + '-merged.ckpt'
+ filename = primary_model_info.model_name + '_' + str(round(1-multiplier, 2)) + '-' + secondary_model_info.model_name + '_' + str(round(multiplier, 2)) + '-' + interp_method.replace(" ", "_") + '-merged.ckpt'
filename = filename if custom_name == '' else (custom_name + '.ckpt')
output_modelname = os.path.join(ckpt_dir, filename)
@@ -219,4 +223,4 @@ def run_modelmerger(primary_model_name, secondary_model_name, interp_method, int sd_models.list_models()
print(f"Checkpoint saved.")
- return ["Checkpoint saved to " + output_modelname] + [gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(3)]
+ return ["Checkpoint saved to " + output_modelname] + [gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)]
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index ac1ba7f4..c27826b6 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -1,5 +1,8 @@ +import os
import re
import gradio as gr
+from modules.shared import script_path
+from modules import shared
re_param_code = r"\s*([\w ]+):\s*([^,]+)(?:,|$)"
re_param = re.compile(re_param_code)
@@ -61,6 +64,12 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model def connect_paste(button, paste_fields, input_comp, js=None):
def paste_func(prompt):
+ if not prompt and not shared.cmd_opts.hide_ui_dir_config:
+ filename = os.path.join(script_path, "params.txt")
+ if os.path.exists(filename):
+ with open(filename, "r", encoding="utf8") as file:
+ prompt = file.read()
+
params = parse_generation_parameters(prompt)
res = []
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 2751a8c8..edb8cba1 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -19,6 +19,8 @@ from modules.textual_inversion.learn_schedule import LearnRateScheduler class HypernetworkModule(torch.nn.Module):
+ multiplier = 1.0
+
def __init__(self, dim, state_dict=None):
super().__init__()
@@ -37,7 +39,11 @@ class HypernetworkModule(torch.nn.Module): self.to(devices.device)
def forward(self, x):
- return x + (self.linear2(self.linear1(x)))
+ return x + (self.linear2(self.linear1(x))) * self.multiplier
+
+
+def apply_strength(value=None):
+ HypernetworkModule.multiplier = value if value is not None else shared.opts.sd_hypernetwork_strength
class Hypernetwork:
@@ -175,7 +181,7 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None): return self.to_out(out)
-def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, steps, create_image_every, save_hypernetwork_every, write_csv_every, template_file, preview_image_prompt):
+def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
assert hypernetwork_name, 'hypernetwork not selected'
path = shared.hypernetworks.get(hypernetwork_name, None)
@@ -274,20 +280,31 @@ def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, if hypernetwork.step > 0 and images_dir is not None and hypernetwork.step % create_image_every == 0:
last_saved_image = os.path.join(images_dir, f'{hypernetwork_name}-{hypernetwork.step}.png')
- preview_text = entry.cond_text if preview_image_prompt == "" else preview_image_prompt
-
optimizer.zero_grad()
shared.sd_model.cond_stage_model.to(devices.device)
shared.sd_model.first_stage_model.to(devices.device)
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
- prompt=preview_text,
- steps=20,
do_not_save_grid=True,
do_not_save_samples=True,
)
+ if preview_from_txt2img:
+ p.prompt = preview_prompt
+ p.negative_prompt = preview_negative_prompt
+ p.steps = preview_steps
+ p.sampler_index = preview_sampler_index
+ p.cfg_scale = preview_cfg_scale
+ p.seed = preview_seed
+ p.width = preview_width
+ p.height = preview_height
+ else:
+ p.prompt = entry.cond_text
+ p.steps = 20
+
+ preview_text = p.prompt
+
processed = processing.process_images(p)
image = processed.images[0] if len(processed.images)>0 else None
diff --git a/modules/images.py b/modules/images.py index c0a90676..b9589563 100644 --- a/modules/images.py +++ b/modules/images.py @@ -1,4 +1,5 @@ import datetime
+import io
import math
import os
from collections import namedtuple
@@ -23,6 +24,10 @@ def image_grid(imgs, batch_size=1, rows=None): rows = opts.n_rows
elif opts.n_rows == 0:
rows = batch_size
+ elif opts.grid_prevent_empty_spots:
+ rows = math.floor(math.sqrt(len(imgs)))
+ while len(imgs) % rows != 0:
+ rows -= 1
else:
rows = math.sqrt(len(imgs))
rows = round(rows)
@@ -463,3 +468,22 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i txt_fullfn = None
return fullfn, txt_fullfn
+
+
+def image_data(data):
+ try:
+ image = Image.open(io.BytesIO(data))
+ textinfo = image.text["parameters"]
+ return textinfo, None
+ except Exception:
+ pass
+
+ try:
+ text = data.decode('utf8')
+ assert len(text) < 10000
+ return text, None
+
+ except Exception:
+ pass
+
+ return '', None
diff --git a/modules/images_history.py b/modules/images_history.py new file mode 100644 index 00000000..723f5301 --- /dev/null +++ b/modules/images_history.py @@ -0,0 +1,165 @@ +import os +import shutil +def traverse_all_files(output_dir, image_list, curr_dir=None): + curr_path = output_dir if curr_dir is None else os.path.join(output_dir, curr_dir) + try: + f_list = os.listdir(curr_path) + except: + if curr_dir[-10:].rfind(".") > 0 and curr_dir[-4:] != ".txt": + image_list.append(curr_dir) + return image_list + for file in f_list: + file = file if curr_dir is None else os.path.join(curr_dir, file) + file_path = os.path.join(curr_path, file) + if file[-4:] == ".txt": + pass + elif os.path.isfile(file_path) and file[-10:].rfind(".") > 0: + image_list.append(file) + else: + image_list = traverse_all_files(output_dir, image_list, file) + return image_list + + +def get_recent_images(dir_name, page_index, step, image_index, tabname): + page_index = int(page_index) + f_list = os.listdir(dir_name) + image_list = [] + image_list = traverse_all_files(dir_name, image_list) + image_list = sorted(image_list, key=lambda file: -os.path.getctime(os.path.join(dir_name, file))) + num = 48 if tabname != "extras" else 12 + max_page_index = len(image_list) // num + 1 + page_index = max_page_index if page_index == -1 else page_index + step + page_index = 1 if page_index < 1 else page_index + page_index = max_page_index if page_index > max_page_index else page_index + idx_frm = (page_index - 1) * num + image_list = image_list[idx_frm:idx_frm + num] + image_index = int(image_index) + if image_index < 0 or image_index > len(image_list) - 1: + current_file = None + hidden = None + else: + current_file = image_list[int(image_index)] + hidden = os.path.join(dir_name, current_file) + return [os.path.join(dir_name, file) for file in image_list], page_index, image_list, current_file, hidden, "" + +def first_page_click(dir_name, page_index, image_index, tabname): + return get_recent_images(dir_name, 1, 0, image_index, tabname) +def end_page_click(dir_name, page_index, image_index, tabname): + return get_recent_images(dir_name, -1, 0, image_index, tabname) +def prev_page_click(dir_name, page_index, image_index, tabname): + return get_recent_images(dir_name, page_index, -1, image_index, tabname) +def next_page_click(dir_name, page_index, image_index, tabname): + return get_recent_images(dir_name, page_index, 1, image_index, tabname) +def page_index_change(dir_name, page_index, image_index, tabname): + return get_recent_images(dir_name, page_index, 0, image_index, tabname) + +def show_image_info(num, image_path, filenames): + #print(f"select image {num}") + file = filenames[int(num)] + return file, num, os.path.join(image_path, file) +def delete_image(delete_num, tabname, dir_name, name, page_index, filenames, image_index): + if name == "": + return filenames, delete_num + else: + delete_num = int(delete_num) + index = list(filenames).index(name) + i = 0 + new_file_list = [] + for name in filenames: + if i >= index and i < index + delete_num: + path = os.path.join(dir_name, name) + if os.path.exists(path): + print(f"Delete file {path}") + os.remove(path) + txt_file = os.path.splitext(path)[0] + ".txt" + if os.path.exists(txt_file): + os.remove(txt_file) + else: + print(f"Not exists file {path}") + else: + new_file_list.append(name) + i += 1 + return new_file_list, 1 + +def show_images_history(gr, opts, tabname, run_pnginfo, switch_dict): + if tabname == "txt2img": + dir_name = opts.outdir_txt2img_samples + elif tabname == "img2img": + dir_name = opts.outdir_img2img_samples + elif tabname == "extras": + dir_name = opts.outdir_extras_samples + d = dir_name.split("/") + dir_name = d[0] + for p in d[1:]: + dir_name = os.path.join(dir_name, p) + with gr.Row(): + renew_page = gr.Button('Renew Page', elem_id=tabname + "_images_history_renew_page") + first_page = gr.Button('First Page') + prev_page = gr.Button('Prev Page') + page_index = gr.Number(value=1, label="Page Index") + next_page = gr.Button('Next Page') + end_page = gr.Button('End Page') + with gr.Row(elem_id=tabname + "_images_history"): + with gr.Row(): + with gr.Column(scale=2): + history_gallery = gr.Gallery(show_label=False, elem_id=tabname + "_images_history_gallery").style(grid=6) + with gr.Row(): + delete_num = gr.Number(value=1, interactive=True, label="number of images to delete consecutively next") + delete = gr.Button('Delete', elem_id=tabname + "_images_history_del_button") + with gr.Column(): + with gr.Row(): + pnginfo_send_to_txt2img = gr.Button('Send to txt2img') + pnginfo_send_to_img2img = gr.Button('Send to img2img') + with gr.Row(): + with gr.Column(): + img_file_info = gr.Textbox(label="Generate Info", interactive=False) + img_file_name = gr.Textbox(label="File Name", interactive=False) + with gr.Row(): + # hiden items + + img_path = gr.Textbox(dir_name.rstrip("/") , visible=False) + tabname_box = gr.Textbox(tabname, visible=False) + image_index = gr.Textbox(value=-1, visible=False) + set_index = gr.Button('set_index', elem_id=tabname + "_images_history_set_index", visible=False) + filenames = gr.State() + hidden = gr.Image(type="pil", visible=False) + info1 = gr.Textbox(visible=False) + info2 = gr.Textbox(visible=False) + + + # turn pages + gallery_inputs = [img_path, page_index, image_index, tabname_box] + gallery_outputs = [history_gallery, page_index, filenames, img_file_name, hidden, img_file_name] + + first_page.click(first_page_click, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs) + next_page.click(next_page_click, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs) + prev_page.click(prev_page_click, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs) + end_page.click(end_page_click, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs) + page_index.submit(page_index_change, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs) + renew_page.click(page_index_change, _js="images_history_turnpage", inputs=gallery_inputs, outputs=gallery_outputs) + #page_index.change(page_index_change, inputs=[tabname_box, img_path, page_index], outputs=[history_gallery, page_index]) + + #other funcitons + set_index.click(show_image_info, _js="images_history_get_current_img", inputs=[tabname_box, img_path, filenames], outputs=[img_file_name, image_index, hidden]) + img_file_name.change(fn=None, _js="images_history_enable_del_buttons", inputs=None, outputs=None) + delete.click(delete_image,_js="images_history_delete", inputs=[delete_num, tabname_box, img_path, img_file_name, page_index, filenames, image_index], outputs=[filenames, delete_num]) + hidden.change(fn=run_pnginfo, inputs=[hidden], outputs=[info1, img_file_info, info2]) + + #pnginfo.click(fn=run_pnginfo, inputs=[hidden], outputs=[info1, img_file_info, info2]) + switch_dict["fn"](pnginfo_send_to_txt2img, switch_dict["t2i"], img_file_info, 'switch_to_txt2img') + switch_dict["fn"](pnginfo_send_to_img2img, switch_dict["i2i"], img_file_info, 'switch_to_img2img_img2img') + + +def create_history_tabs(gr, opts, run_pnginfo, switch_dict): + with gr.Blocks(analytics_enabled=False) as images_history: + with gr.Tabs() as tabs: + with gr.Tab("txt2img history"): + with gr.Blocks(analytics_enabled=False) as images_history_txt2img: + show_images_history(gr, opts, "txt2img", run_pnginfo, switch_dict) + with gr.Tab("img2img history"): + with gr.Blocks(analytics_enabled=False) as images_history_img2img: + show_images_history(gr, opts, "img2img", run_pnginfo, switch_dict) + with gr.Tab("extras history"): + with gr.Blocks(analytics_enabled=False) as images_history_img2img: + show_images_history(gr, opts, "extras", run_pnginfo, switch_dict) + return images_history diff --git a/modules/interrogate.py b/modules/interrogate.py index 635e266e..9263d65a 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -55,7 +55,7 @@ class InterrogateModels: model, preprocess = clip.load(clip_model_name)
model.eval()
- model = model.to(shared.device)
+ model = model.to(devices.device_interrogate)
return model, preprocess
@@ -65,14 +65,14 @@ class InterrogateModels: if not shared.cmd_opts.no_half:
self.blip_model = self.blip_model.half()
- self.blip_model = self.blip_model.to(shared.device)
+ self.blip_model = self.blip_model.to(devices.device_interrogate)
if self.clip_model is None:
self.clip_model, self.clip_preprocess = self.load_clip_model()
if not shared.cmd_opts.no_half:
self.clip_model = self.clip_model.half()
- self.clip_model = self.clip_model.to(shared.device)
+ self.clip_model = self.clip_model.to(devices.device_interrogate)
self.dtype = next(self.clip_model.parameters()).dtype
@@ -99,11 +99,11 @@ class InterrogateModels: text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)]
top_count = min(top_count, len(text_array))
- text_tokens = clip.tokenize([text for text in text_array], truncate=True).to(shared.device)
+ text_tokens = clip.tokenize([text for text in text_array], truncate=True).to(devices.device_interrogate)
text_features = self.clip_model.encode_text(text_tokens).type(self.dtype)
text_features /= text_features.norm(dim=-1, keepdim=True)
- similarity = torch.zeros((1, len(text_array))).to(shared.device)
+ similarity = torch.zeros((1, len(text_array))).to(devices.device_interrogate)
for i in range(image_features.shape[0]):
similarity += (100.0 * image_features[i].unsqueeze(0) @ text_features.T).softmax(dim=-1)
similarity /= image_features.shape[0]
@@ -116,14 +116,14 @@ class InterrogateModels: transforms.Resize((blip_image_eval_size, blip_image_eval_size), interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
- ])(pil_image).unsqueeze(0).type(self.dtype).to(shared.device)
+ ])(pil_image).unsqueeze(0).type(self.dtype).to(devices.device_interrogate)
with torch.no_grad():
caption = self.blip_model.generate(gpu_image, sample=False, num_beams=shared.opts.interrogate_clip_num_beams, min_length=shared.opts.interrogate_clip_min_length, max_length=shared.opts.interrogate_clip_max_length)
return caption[0]
- def interrogate(self, pil_image):
+ def interrogate(self, pil_image, include_ranks=False):
res = None
try:
@@ -140,7 +140,7 @@ class InterrogateModels: res = caption
- clip_image = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(shared.device)
+ clip_image = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(devices.device_interrogate)
precision_scope = torch.autocast if shared.cmd_opts.precision == "autocast" else contextlib.nullcontext
with torch.no_grad(), precision_scope("cuda"):
@@ -156,7 +156,10 @@ class InterrogateModels: for name, topn, items in self.categories:
matches = self.rank(image_features, items, top_count=topn)
for match, score in matches:
- res += ", " + match
+ if include_ranks:
+ res += ", " + match
+ else:
+ res += f", ({match}:{score})"
except Exception:
print(f"Error interrogating", file=sys.stderr)
diff --git a/modules/processing.py b/modules/processing.py index 698b3069..100a259f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -324,6 +324,10 @@ def process_images(p: StableDiffusionProcessing) -> Processed: else:
assert p.prompt is not None
+ with open(os.path.join(shared.script_path, "params.txt"), "w", encoding="utf8") as file:
+ processed = Processed(p, [], p.seed, "")
+ file.write(processed.infotext(p, 0))
+
devices.torch_gc()
seed = get_fixed_seed(p.seed)
@@ -502,11 +506,12 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): firstphase_width_truncated = 0
firstphase_height_truncated = 0
- def __init__(self, enable_hr=False, scale_latent=True, denoising_strength=0.75, **kwargs):
+ def __init__(self, enable_hr=False, denoising_strength=0.75, firstphase_width=512, firstphase_height=512, **kwargs):
super().__init__(**kwargs)
self.enable_hr = enable_hr
- self.scale_latent = scale_latent
self.denoising_strength = denoising_strength
+ self.firstphase_width = firstphase_width
+ self.firstphase_height = firstphase_height
def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr:
@@ -515,15 +520,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): else:
state.job_count = state.job_count * 2
- desired_pixel_count = 512 * 512
- actual_pixel_count = self.width * self.height
- scale = math.sqrt(desired_pixel_count / actual_pixel_count)
-
- self.firstphase_width = math.ceil(scale * self.width / 64) * 64
- self.firstphase_height = math.ceil(scale * self.height / 64) * 64
- self.firstphase_width_truncated = int(scale * self.width)
- self.firstphase_height_truncated = int(scale * self.height)
-
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
@@ -532,39 +528,46 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning)
return samples
+ self.extra_generation_params["First pass size"] = f"{self.firstphase_width}x{self.firstphase_height}"
+
x = create_random_tensors([opt_C, self.firstphase_height // opt_f, self.firstphase_width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning)
- truncate_x = (self.firstphase_width - self.firstphase_width_truncated) // opt_f
- truncate_y = (self.firstphase_height - self.firstphase_height_truncated) // opt_f
+ truncate_x = 0
+ truncate_y = 0
+ width_ratio = self.width/self.firstphase_width
+ height_ratio = self.height/self.firstphase_height
+
+ if width_ratio > height_ratio:
+ truncate_y = int((self.width - self.firstphase_width) / width_ratio / height_ratio / opt_f)
+ elif width_ratio < height_ratio:
+ truncate_x = int((self.height - self.firstphase_height) / width_ratio / height_ratio / opt_f)
+
samples = samples[:, :, truncate_y//2:samples.shape[2]-truncate_y//2, truncate_x//2:samples.shape[3]-truncate_x//2]
- if self.scale_latent:
- samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
+ decoded_samples = decode_first_stage(self.sd_model, samples)
+
+ if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None":
+ decoded_samples = torch.nn.functional.interpolate(decoded_samples, size=(self.height, self.width), mode="bilinear")
else:
- decoded_samples = decode_first_stage(self.sd_model, samples)
+ lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
- if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None":
- decoded_samples = torch.nn.functional.interpolate(decoded_samples, size=(self.height, self.width), mode="bilinear")
- else:
- lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
-
- batch_images = []
- for i, x_sample in enumerate(lowres_samples):
- x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
- x_sample = x_sample.astype(np.uint8)
- image = Image.fromarray(x_sample)
- image = images.resize_image(0, image, self.width, self.height)
- image = np.array(image).astype(np.float32) / 255.0
- image = np.moveaxis(image, 2, 0)
- batch_images.append(image)
-
- decoded_samples = torch.from_numpy(np.array(batch_images))
- decoded_samples = decoded_samples.to(shared.device)
- decoded_samples = 2. * decoded_samples - 1.
-
- samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
+ batch_images = []
+ for i, x_sample in enumerate(lowres_samples):
+ x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
+ x_sample = x_sample.astype(np.uint8)
+ image = Image.fromarray(x_sample)
+ image = images.resize_image(0, image, self.width, self.height)
+ image = np.array(image).astype(np.float32) / 255.0
+ image = np.moveaxis(image, 2, 0)
+ batch_images.append(image)
+
+ decoded_samples = torch.from_numpy(np.array(batch_images))
+ decoded_samples = decoded_samples.to(shared.device)
+ decoded_samples = 2. * decoded_samples - 1.
+
+ samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
shared.state.nextjob()
diff --git a/modules/safe.py b/modules/safe.py index 20be16a5..399165a1 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -96,11 +96,18 @@ def load(filename, *args, **kwargs): if not shared.cmd_opts.disable_safe_unpickle:
check_pt(filename)
+ except pickle.UnpicklingError:
+ print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+ print(f"-----> !!!! The file is most likely corrupted !!!! <-----", file=sys.stderr)
+ print(f"You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.\n\n", file=sys.stderr)
+ return None
+
except Exception:
print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
print(f"\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr)
- print(f"You can skip this check with --disable-safe-unpickle commandline argument.", file=sys.stderr)
+ print(f"You can skip this check with --disable-safe-unpickle commandline argument.\n\n", file=sys.stderr)
return None
return unsafe_torch_load(filename, *args, **kwargs)
diff --git a/modules/sd_models.py b/modules/sd_models.py index 0a55b4c3..3a01c93d 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -134,7 +134,8 @@ def load_model_weights(model, checkpoint_info): print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
- pl_sd = torch.load(checkpoint_file, map_location="cpu")
+ pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
+
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
@@ -158,7 +159,9 @@ def load_model_weights(model, checkpoint_info): if os.path.exists(vae_file):
print(f"Loading VAE weights from: {vae_file}")
- vae_ckpt = torch.load(vae_file, map_location="cpu")
+
+ vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location)
+
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
model.first_stage_model.load_state_dict(vae_dict)
diff --git a/modules/shared.py b/modules/shared.py index 78b73aae..695d29b6 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -13,7 +13,7 @@ import modules.memmon import modules.sd_models
import modules.styles
import modules.devices as devices
-from modules import sd_samplers
+from modules import sd_samplers, sd_models
from modules.hypernetworks import hypernetwork
from modules.paths import models_path, script_path, sd_path
@@ -34,6 +34,7 @@ parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_ parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
+parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
@@ -54,7 +55,7 @@ parser.add_argument("--opt-split-attention", action='store_true', help="force-en parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
-parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU as torch device for specified modules", default=[])
+parser.add_argument("--use-cpu", nargs='+',choices=['all', 'sd', 'interrogate', 'gfpgan', 'bsrgan', 'esrgan', 'scunet', 'codeformer'], help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
@@ -76,10 +77,11 @@ parser.add_argument("--disable-safe-unpickle", action='store_true', help="disabl cmd_opts = parser.parse_args()
-devices.device, devices.device_gfpgan, devices.device_bsrgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
-(devices.cpu if x in cmd_opts.use_cpu else devices.get_optimal_device() for x in ['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'])
+devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_bsrgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
+(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'bsrgan', 'esrgan', 'scunet', 'codeformer'])
device = devices.device
+weight_load_location = None if cmd_opts.lowram else "cpu"
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
@@ -145,14 +147,14 @@ def realesrgan_models_names(): class OptionInfo:
- def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, show_on_main_page=False):
+ def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, show_on_main_page=False, refresh=None):
self.default = default
self.label = label
self.component = component
self.component_args = component_args
self.onchange = onchange
self.section = None
- self.show_on_main_page = show_on_main_page
+ self.refresh = refresh
def options_section(section_identifier, options_dict):
@@ -175,6 +177,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "grid_format": OptionInfo('png', 'File format for grids'),
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
+ "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
@@ -237,8 +240,9 @@ options_templates.update(options_section(('training', "Training"), { }))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
- "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, show_on_main_page=True),
- "sd_hypernetwork": OptionInfo("None", "Stable Diffusion finetune hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}),
+ "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, refresh=sd_models.list_models),
+ "sd_hypernetwork": OptionInfo("None", "Hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks),
+ "sd_hypernetwork_strength": OptionInfo(1.0, "Hypernetwork strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.001}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
@@ -250,14 +254,17 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "filter_nsfw": OptionInfo(False, "Filter NSFW content"),
'CLIP_stop_at_last_layers': OptionInfo(1, "Stop At last layers of CLIP model", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
+ 'quicksettings': OptionInfo("sd_model_checkpoint", "Quicksettings list"),
}))
options_templates.update(options_section(('interrogate', "Interrogate Options"), {
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
+ "interrogate_return_ranks": OptionInfo(False, "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators)."),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
+ "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file (0 = No limit)"),
"interrogate_deepbooru_score_threshold": OptionInfo(0.5, "Interrogate: deepbooru score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"deepbooru_sort_alpha": OptionInfo(True, "Interrogate: deepbooru sort alphabetically"),
"deepbooru_use_spaces": OptionInfo(False, "use spaces for tags in deepbooru"),
@@ -345,6 +352,8 @@ class Options: item = self.data_labels.get(key)
item.onchange = func
+ func()
+
def dumpjson(self):
d = {k: self.data.get(k, self.data_labels.get(k).default) for k in self.data_labels.keys()}
return json.dumps(d)
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 3047bede..886cf0c3 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -17,7 +17,9 @@ def preprocess(process_src, process_dst, process_width, process_height, process_ shared.interrogator.load()
if process_caption_deepbooru:
- deepbooru.create_deepbooru_process(opts.interrogate_deepbooru_score_threshold, deepbooru.create_deepbooru_opts())
+ db_opts = deepbooru.create_deepbooru_opts()
+ db_opts[deepbooru.OPT_INCLUDE_RANKS] = False
+ deepbooru.create_deepbooru_process(opts.interrogate_deepbooru_score_threshold, db_opts)
preprocess_work(process_src, process_dst, process_width, process_height, process_flip, process_split, process_caption, process_caption_deepbooru)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index b83df079..1f5ace6f 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -173,7 +173,7 @@ def create_embedding(name, num_vectors_per_token, init_text='*'): return fn
-def train_embedding(embedding_name, learn_rate, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, write_csv_every, template_file, save_image_with_stored_embedding, preview_image_prompt):
+def train_embedding(embedding_name, learn_rate, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
assert embedding_name, 'embedding not selected'
shared.state.textinfo = "Initializing textual inversion training..."
@@ -275,18 +275,29 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, traini if embedding.step > 0 and images_dir is not None and embedding.step % create_image_every == 0:
last_saved_image = os.path.join(images_dir, f'{embedding_name}-{embedding.step}.png')
- preview_text = entry.cond_text if preview_image_prompt == "" else preview_image_prompt
-
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
- prompt=preview_text,
- steps=20,
- height=training_height,
- width=training_width,
do_not_save_grid=True,
do_not_save_samples=True,
)
+ if preview_from_txt2img:
+ p.prompt = preview_prompt
+ p.negative_prompt = preview_negative_prompt
+ p.steps = preview_steps
+ p.sampler_index = preview_sampler_index
+ p.cfg_scale = preview_cfg_scale
+ p.seed = preview_seed
+ p.width = preview_width
+ p.height = preview_height
+ else:
+ p.prompt = entry.cond_text
+ p.steps = 20
+ p.width = training_width
+ p.height = training_height
+
+ preview_text = p.prompt
+
processed = processing.process_images(p)
image = processed.images[0]
diff --git a/modules/txt2img.py b/modules/txt2img.py index e985242b..2381347f 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -6,7 +6,7 @@ import modules.processing as processing from modules.ui import plaintext_to_html
-def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, scale_latent: bool, denoising_strength: float, *args):
+def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, firstphase_width: int, firstphase_height: int, *args):
p = StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples,
@@ -30,8 +30,9 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: restore_faces=restore_faces,
tiling=tiling,
enable_hr=enable_hr,
- scale_latent=scale_latent if enable_hr else None,
denoising_strength=denoising_strength if enable_hr else None,
+ firstphase_width=firstphase_width if enable_hr else None,
+ firstphase_height=firstphase_height if enable_hr else None,
)
if cmd_opts.enable_console_prompts:
diff --git a/modules/ui.py b/modules/ui.py index 1195c2f1..be4a43a7 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -22,7 +22,7 @@ import gradio as gr import gradio.utils
import gradio.routes
-from modules import sd_hijack
+from modules import sd_hijack, sd_models
from modules.paths import script_path
from modules.shared import opts, cmd_opts
if cmd_opts.deepdanbooru:
@@ -40,6 +40,7 @@ from modules import prompt_parser from modules.images import save_image
import modules.textual_inversion.ui
import modules.hypernetworks.ui
+import modules.images_history as img_his
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
mimetypes.init()
@@ -78,6 +79,8 @@ reuse_symbol = '\u267b\ufe0f' # ♻️ art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
folder_symbol = '\U0001f4c2' # 📂
+refresh_symbol = '\U0001f504' # 🔄
+
def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
@@ -431,7 +434,6 @@ def create_toprow(is_img2img): with gr.Column(scale=80):
with gr.Row():
prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, placeholder="Prompt", lines=2)
-
with gr.Column(scale=1, elem_id="roll_col"):
roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
paste = gr.Button(value=paste_symbol, elem_id="paste")
@@ -506,13 +508,40 @@ def setup_progressbar(progressbar, preview, id_part, textinfo=None): )
+def apply_setting(key, value):
+ if value is None:
+ return gr.update()
+
+ if key == "sd_model_checkpoint":
+ ckpt_info = sd_models.get_closet_checkpoint_match(value)
+
+ if ckpt_info is not None:
+ value = ckpt_info.title
+ else:
+ return gr.update()
+
+ comp_args = opts.data_labels[key].component_args
+ if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
+ return
+
+ valtype = type(opts.data_labels[key].default)
+ oldval = opts.data[key]
+ opts.data[key] = valtype(value) if valtype != type(None) else value
+ if oldval != value and opts.data_labels[key].onchange is not None:
+ opts.data_labels[key].onchange()
+
+ opts.save(shared.config_filename)
+ return value
+
+
def create_ui(wrap_gradio_gpu_call):
import modules.img2img
import modules.txt2img
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
- txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False)
+ txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
+ txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="bytes", visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
with gr.Column(scale=1):
@@ -538,10 +567,11 @@ def create_ui(wrap_gradio_gpu_call): enable_hr = gr.Checkbox(label='Highres. fix', value=False)
with gr.Row(visible=False) as hr_options:
- scale_latent = gr.Checkbox(label='Scale latent', value=False)
+ firstphase_width = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass width", value=512)
+ firstphase_height = gr.Slider(minimum=64, maximum=1024, step=64, label="First pass height", value=512)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7)
- with gr.Row():
+ with gr.Row(equal_height=True):
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1)
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1)
@@ -600,8 +630,9 @@ def create_ui(wrap_gradio_gpu_call): height,
width,
enable_hr,
- scale_latent,
denoising_strength,
+ firstphase_width,
+ firstphase_height,
] + custom_inputs,
outputs=[
txt2img_gallery,
@@ -614,6 +645,17 @@ def create_ui(wrap_gradio_gpu_call): txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args)
+ txt_prompt_img.change(
+ fn=modules.images.image_data,
+ inputs=[
+ txt_prompt_img
+ ],
+ outputs=[
+ txt2img_prompt,
+ txt_prompt_img
+ ]
+ )
+
enable_hr.change(
fn=lambda x: gr_show(x),
inputs=[enable_hr],
@@ -666,14 +708,29 @@ def create_ui(wrap_gradio_gpu_call): (denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
+ (firstphase_width, "First pass size-1"),
+ (firstphase_height, "First pass size-2"),
+ ]
+
+ txt2img_preview_params = [
+ txt2img_prompt,
+ txt2img_negative_prompt,
+ steps,
+ sampler_index,
+ cfg_scale,
+ seed,
+ width,
+ height,
]
- modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
+
token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as img2img_interface:
- img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True)
+ img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, token_counter, token_button = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
+ img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="bytes", visible=False)
+
with gr.Column(scale=1):
pass
@@ -768,6 +825,17 @@ def create_ui(wrap_gradio_gpu_call): connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
+ img2img_prompt_img.change(
+ fn=modules.images.image_data,
+ inputs=[
+ img2img_prompt_img
+ ],
+ outputs=[
+ img2img_prompt,
+ img2img_prompt_img
+ ]
+ )
+
mask_mode.change(
lambda mode, img: {
init_img_with_mask: gr_show(mode == 0),
@@ -908,7 +976,6 @@ def create_ui(wrap_gradio_gpu_call): (seed_resize_from_h, "Seed resize from-2"),
(denoising_strength, "Denoising strength"),
]
- modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt)
token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as extras_interface:
@@ -956,6 +1023,7 @@ def create_ui(wrap_gradio_gpu_call): button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else ''
open_extras_folder = gr.Button('Open output directory', elem_id=button_id)
+
submit.click(
fn=wrap_gradio_gpu_call(modules.extras.run_extras),
_js="get_extras_tab_index",
@@ -1015,6 +1083,13 @@ def create_ui(wrap_gradio_gpu_call): inputs=[image],
outputs=[html, generation_info, html2],
)
+ #images history
+ images_history_switch_dict = {
+ "fn":modules.generation_parameters_copypaste.connect_paste,
+ "t2i":txt2img_paste_fields,
+ "i2i":img2img_paste_fields
+ }
+ images_history = img_his.create_history_tabs(gr, opts, wrap_gradio_call(modules.extras.run_pnginfo), images_history_switch_dict)
with gr.Blocks() as modelmerger_interface:
with gr.Row().style(equal_height=False):
@@ -1022,11 +1097,12 @@ def create_ui(wrap_gradio_gpu_call): gr.HTML(value="<p>A merger of the two checkpoints will be generated in your <b>checkpoint</b> directory.</p>")
with gr.Row():
- primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary Model Name")
- secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary Model Name")
+ primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)")
+ secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)")
+ tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)")
custom_name = gr.Textbox(label="Custom Name (Optional)")
- interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation Amount', value=0.3)
- interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method")
+ interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3)
+ interp_method = gr.Radio(choices=["Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method")
save_as_half = gr.Checkbox(value=False, label="Save as float16")
modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
@@ -1099,7 +1175,7 @@ def create_ui(wrap_gradio_gpu_call): write_csv_every = gr.Number(label='Save an csv containing the loss to log directory every N steps, 0 to disable', value=500, precision=0)
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True)
- preview_image_prompt = gr.Textbox(label='Preview prompt', value="")
+ preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False)
with gr.Row():
interrupt_training = gr.Button(value="Interrupt")
@@ -1178,7 +1254,8 @@ def create_ui(wrap_gradio_gpu_call): write_csv_every,
template_file,
save_image_with_stored_embedding,
- preview_image_prompt,
+ preview_from_txt2img,
+ *txt2img_preview_params,
],
outputs=[
ti_output,
@@ -1199,7 +1276,8 @@ def create_ui(wrap_gradio_gpu_call): save_embedding_every,
write_csv_every,
template_file,
- preview_image_prompt,
+ preview_from_txt2img,
+ *txt2img_preview_params,
],
outputs=[
ti_output,
@@ -1213,8 +1291,7 @@ def create_ui(wrap_gradio_gpu_call): outputs=[],
)
-
- def create_setting_component(key):
+ def create_setting_component(key, is_quicksettings=False):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
@@ -1234,7 +1311,34 @@ def create_ui(wrap_gradio_gpu_call): else:
raise Exception(f'bad options item type: {str(t)} for key {key}')
- return comp(label=info.label, value=fun, **(args or {}))
+ if info.refresh is not None:
+ if is_quicksettings:
+ res = comp(label=info.label, value=fun, **(args or {}))
+ refresh_button = gr.Button(value=refresh_symbol, elem_id="refresh_"+key)
+ else:
+ with gr.Row(variant="compact"):
+ res = comp(label=info.label, value=fun, **(args or {}))
+ refresh_button = gr.Button(value=refresh_symbol, elem_id="refresh_" + key)
+
+ def refresh():
+ info.refresh()
+ refreshed_args = info.component_args() if callable(info.component_args) else info.component_args
+
+ for k, v in refreshed_args.items():
+ setattr(res, k, v)
+
+ return gr.update(**(refreshed_args or {}))
+
+ refresh_button.click(
+ fn=refresh,
+ inputs=[],
+ outputs=[res],
+ )
+ else:
+ res = comp(label=info.label, value=fun, **(args or {}))
+
+
+ return res
components = []
component_dict = {}
@@ -1308,6 +1412,9 @@ Requested path was: {f} settings_cols = 3
items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols)
+ quicksettings_names = [x.strip() for x in opts.quicksettings.split(",")]
+ quicksettings_names = set(x for x in quicksettings_names if x != 'quicksettings')
+
quicksettings_list = []
cols_displayed = 0
@@ -1332,7 +1439,7 @@ Requested path was: {f} gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='<h1 class="gr-button-lg">{}</h1>'.format(item.section[1]))
- if item.show_on_main_page:
+ if k in quicksettings_names:
quicksettings_list.append((i, k, item))
components.append(dummy_component)
else:
@@ -1341,7 +1448,11 @@ Requested path was: {f} components.append(component)
items_displayed += 1
- request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
+ with gr.Row():
+ request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
+ reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary')
+ restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
+
request_notifications.click(
fn=lambda: None,
inputs=[],
@@ -1349,10 +1460,6 @@ Requested path was: {f} _js='function(){}'
)
- with gr.Row():
- reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary')
- restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
-
def reload_scripts():
modules.scripts.reload_script_body_only()
@@ -1367,7 +1474,6 @@ Requested path was: {f} shared.state.interrupt()
settings_interface.gradio_ref.do_restart = True
-
restart_gradio.click(
fn=request_restart,
inputs=[],
@@ -1383,6 +1489,7 @@ Requested path was: {f} (img2img_interface, "img2img", "img2img"),
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
+ (images_history, "History", "images_history"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"),
(train_interface, "Train", "ti"),
(settings_interface, "Settings", "settings"),
@@ -1402,12 +1509,12 @@ Requested path was: {f} with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Row(elem_id="quicksettings"):
for i, k, item in quicksettings_list:
- component = create_setting_component(k)
+ component = create_setting_component(k, is_quicksettings=True)
component_dict[k] = component
settings_interface.gradio_ref = demo
- with gr.Tabs() as tabs:
+ with gr.Tabs(elem_id="tabs") as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid, elem_id='tab_' + ifid):
interface.render()
@@ -1446,6 +1553,7 @@ Requested path was: {f} inputs=[
primary_model_name,
secondary_model_name,
+ tertiary_model_name,
interp_method,
interp_amount,
save_as_half,
@@ -1455,6 +1563,7 @@ Requested path was: {f} submit_result,
primary_model_name,
secondary_model_name,
+ tertiary_model_name,
component_dict['sd_model_checkpoint'],
]
)
@@ -1521,8 +1630,22 @@ Requested path was: {f} outputs=[extras_image],
)
- modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields, generation_info, 'switch_to_txt2img')
- modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields, generation_info, 'switch_to_img2img_img2img')
+ settings_map = {
+ 'sd_hypernetwork': 'Hypernet',
+ 'CLIP_stop_at_last_layers': 'Clip skip',
+ 'sd_model_checkpoint': 'Model hash',
+ }
+
+ settings_paste_fields = [
+ (component_dict[k], lambda d, k=k, v=v: apply_setting(k, d.get(v, None)))
+ for k, v in settings_map.items()
+ ]
+
+ modules.generation_parameters_copypaste.connect_paste(txt2img_paste, txt2img_paste_fields + settings_paste_fields, txt2img_prompt)
+ modules.generation_parameters_copypaste.connect_paste(img2img_paste, img2img_paste_fields + settings_paste_fields, img2img_prompt)
+
+ modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields + settings_paste_fields, generation_info, 'switch_to_txt2img')
+ modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields + settings_paste_fields, generation_info, 'switch_to_img2img_img2img')
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
@@ -1604,3 +1727,4 @@ if 'gradio_routes_templates_response' not in globals(): gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
gradio.routes.templates.TemplateResponse = template_response
+
diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py index 313a55d2..d438175c 100644 --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -120,15 +120,45 @@ class Script(scripts.Script): return is_img2img
def ui(self, is_img2img):
+ info = gr.Markdown('''
+ * `CFG Scale` should be 2 or lower.
+ ''')
+
+ override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True)
+
+ override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True)
original_prompt = gr.Textbox(label="Original prompt", lines=1)
original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1)
- cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0)
+
+ override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True)
st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50)
+
+ override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True)
+
+ cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0)
randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0)
sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False)
- return [original_prompt, original_negative_prompt, cfg, st, randomness, sigma_adjustment]
- def run(self, p, original_prompt, original_negative_prompt, cfg, st, randomness, sigma_adjustment):
+ return [
+ info,
+ override_sampler,
+ override_prompt, original_prompt, original_negative_prompt,
+ override_steps, st,
+ override_strength,
+ cfg, randomness, sigma_adjustment,
+ ]
+
+ def run(self, p, _, override_sampler, override_prompt, original_prompt, original_negative_prompt, override_steps, st, override_strength, cfg, randomness, sigma_adjustment):
+ # Override
+ if override_sampler:
+ p.sampler_index = [sampler.name for sampler in sd_samplers.samplers].index("Euler")
+ if override_prompt:
+ p.prompt = original_prompt
+ p.negative_prompt = original_negative_prompt
+ if override_steps:
+ p.steps = st
+ if override_strength:
+ p.denoising_strength = 1.0
def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 3bb080bf..8c7da6bb 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -107,6 +107,10 @@ def apply_hypernetwork(p, x, xs): hypernetwork.load_hypernetwork(name)
+def apply_hypernetwork_strength(p, x, xs):
+ hypernetwork.apply_strength(x)
+
+
def confirm_hypernetworks(p, xs):
for x in xs:
if x.lower() in ["", "none"]:
@@ -165,23 +169,28 @@ axis_options = [ AxisOption("Sampler", str, apply_sampler, format_value, confirm_samplers),
AxisOption("Checkpoint name", str, apply_checkpoint, format_value, confirm_checkpoints),
AxisOption("Hypernetwork", str, apply_hypernetwork, format_value, confirm_hypernetworks),
+ AxisOption("Hypernet str.", float, apply_hypernetwork_strength, format_value_add_label, None),
AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label, None),
AxisOption("Sigma min", float, apply_field("s_tmin"), format_value_add_label, None),
AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label, None),
AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label, None),
AxisOption("Eta", float, apply_field("eta"), format_value_add_label, None),
AxisOption("Clip skip", int, apply_clip_skip, format_value_add_label, None),
- AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label, None), # as it is now all AxisOptionImg2Img items must go after AxisOption ones
+ AxisOption("Denoising", float, apply_field("denoising_strength"), format_value_add_label, None),
]
-def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend):
- res = []
-
+def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend, include_lone_images):
ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
- first_processed = None
+ # Temporary list of all the images that are generated to be populated into the grid.
+ # Will be filled with empty images for any individual step that fails to process properly
+ image_cache = []
+
+ processed_result = None
+ cell_mode = "P"
+ cell_size = (1,1)
state.job_count = len(xs) * len(ys) * p.n_iter
@@ -189,22 +198,39 @@ def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend): for ix, x in enumerate(xs):
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
- processed = cell(x, y)
- if first_processed is None:
- first_processed = processed
-
+ processed:Processed = cell(x, y)
try:
- res.append(processed.images[0])
+ # this dereference will throw an exception if the image was not processed
+ # (this happens in cases such as if the user stops the process from the UI)
+ processed_image = processed.images[0]
+
+ if processed_result is None:
+ # Use our first valid processed result as a template container to hold our full results
+ processed_result = copy(processed)
+ cell_mode = processed_image.mode
+ cell_size = processed_image.size
+ processed_result.images = [Image.new(cell_mode, cell_size)]
+
+ image_cache.append(processed_image)
+ if include_lone_images:
+ processed_result.images.append(processed_image)
+ processed_result.all_prompts.append(processed.prompt)
+ processed_result.all_seeds.append(processed.seed)
+ processed_result.infotexts.append(processed.infotexts[0])
except:
- res.append(Image.new(res[0].mode, res[0].size))
+ image_cache.append(Image.new(cell_mode, cell_size))
+
+ if not processed_result:
+ print("Unexpected error: draw_xy_grid failed to return even a single processed image")
+ return Processed()
- grid = images.image_grid(res, rows=len(ys))
+ grid = images.image_grid(image_cache, rows=len(ys))
if draw_legend:
- grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
+ grid = images.draw_grid_annotations(grid, cell_size[0], cell_size[1], hor_texts, ver_texts)
- first_processed.images = [grid]
+ processed_result.images[0] = grid
- return first_processed
+ return processed_result
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
@@ -229,11 +255,12 @@ class Script(scripts.Script): y_values = gr.Textbox(label="Y values", visible=False, lines=1)
draw_legend = gr.Checkbox(label='Draw legend', value=True)
+ include_lone_images = gr.Checkbox(label='Include Separate Images', value=False)
no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False)
- return [x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds]
+ return [x_type, x_values, y_type, y_values, draw_legend, include_lone_images, no_fixed_seeds]
- def run(self, p, x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds):
+ def run(self, p, x_type, x_values, y_type, y_values, draw_legend, include_lone_images, no_fixed_seeds):
if not no_fixed_seeds:
modules.processing.fix_seed(p)
@@ -311,7 +338,7 @@ class Script(scripts.Script): ys = process_axis(y_opt, y_values)
def fix_axis_seeds(axis_opt, axis_list):
- if axis_opt.label == 'Seed':
+ if axis_opt.label in ['Seed','Var. seed']:
return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
else:
return axis_list
@@ -344,7 +371,8 @@ class Script(scripts.Script): x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
cell=cell,
- draw_legend=draw_legend
+ draw_legend=draw_legend,
+ include_lone_images=include_lone_images
)
if opts.grid_save:
@@ -354,6 +382,8 @@ class Script(scripts.Script): modules.sd_models.reload_model_weights(shared.sd_model)
hypernetwork.load_hypernetwork(opts.sd_hypernetwork)
+ hypernetwork.apply_strength()
+
opts.data["CLIP_stop_at_last_layers"] = CLIP_stop_at_last_layers
@@ -167,14 +167,6 @@ button{ align-self: stretch !important;
}
-#prompt, #negative_prompt{
- border: none !important;
-}
-#prompt textarea, #negative_prompt textarea{
- border: none !important;
-}
-
-
#img2maskimg .h-60{
height: 30rem;
}
@@ -228,6 +220,8 @@ fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block s border-top: 1px solid #eee;
border-left: 1px solid #eee;
border-right: 1px solid #eee;
+
+ z-index: 300;
}
.dark fieldset span.text-gray-500, .dark .gr-block.gr-box span.text-gray-500, .dark label.block span{
@@ -480,16 +474,30 @@ input[type="range"]{ background: #a55000;
}
+#quicksettings {
+ gap: 0.4em;
+}
+
#quicksettings > div{
border: none;
background: none;
+ flex: unset;
+ gap: 0.5em;
}
#quicksettings > div > div{
max-width: 32em;
+ min-width: 24em;
padding: 0;
}
+#refresh_sd_model_checkpoint, #refresh_sd_hypernetwork{
+ max-width: 2.5em;
+ min-width: 2.5em;
+ height: 2.4em;
+}
+
+
canvas[key="mask"] {
z-index: 12 !important;
filter: invert();
@@ -506,3 +514,10 @@ canvas[key="mask"] { z-index: 200;
width: 8em;
}
+#quicksettings .gr-box > div > div > input.gr-text-input {
+ top: -1.12em;
+}
+
+.row.gr-compact{
+ overflow: visible;
+}
@@ -72,7 +72,6 @@ def wrap_gradio_gpu_call(func, extra_outputs=None): return modules.ui.wrap_gradio_call(f, extra_outputs=extra_outputs)
-
def initialize():
modelloader.cleanup_models()
modules.sd_models.setup_model()
@@ -86,6 +85,7 @@ def initialize(): shared.sd_model = modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)))
+ shared.opts.onchange("sd_hypernetwork_strength", modules.hypernetworks.hypernetwork.apply_strength)
def webui():
|