From 1742c04bab5ff88b53dee60cfb9b90076dd98512 Mon Sep 17 00:00:00 2001 From: rucadi Date: Fri, 16 Dec 2022 17:10:13 +0100 Subject: Add polling callback --- modules/script_callbacks.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 8e22f875..715e1830 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -63,6 +63,7 @@ callback_map = dict( callbacks_cfg_denoiser=[], callbacks_before_component=[], callbacks_after_component=[], + callbacks_on_polling=[], ) @@ -78,6 +79,12 @@ def app_started_callback(demo: Optional[Blocks], app: FastAPI): except Exception: report_exception(c, 'app_started_callback') +def app_polling_callback(demo: Optional[Blocks], app: FastAPI): + for c in callback_map['callbacks_on_polling']: + try: + c.callback() + except Exception: + report_exception(c, 'callbacks_on_polling') def model_loaded_callback(sd_model): for c in callback_map['callbacks_model_loaded']: @@ -184,6 +191,11 @@ def on_app_started(callback): add_callback(callback_map['callbacks_app_started'], callback) +def on_polling(callback): + """register a function to be called on each polling of the server.""" + add_callback(callback_map['callbacks_on_polling'], callback) + + def on_model_loaded(callback): """register a function to be called when the stable diffusion model is created; the model is passed as an argument""" -- cgit v1.2.1 From 0c8825b2bec3a68836eacf37718306c9c78554a0 Mon Sep 17 00:00:00 2001 From: rucadi Date: Fri, 16 Dec 2022 18:31:20 +0100 Subject: Add a callback called before reloading the server --- modules/script_callbacks.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 715e1830..b646b0f9 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -64,6 +64,7 @@ callback_map = dict( callbacks_before_component=[], callbacks_after_component=[], callbacks_on_polling=[], + callbacks_on_reload=[], ) @@ -71,7 +72,6 @@ def clear_callbacks(): for callback_list in callback_map.values(): callback_list.clear() - def app_started_callback(demo: Optional[Blocks], app: FastAPI): for c in callback_map['callbacks_app_started']: try: @@ -86,6 +86,14 @@ def app_polling_callback(demo: Optional[Blocks], app: FastAPI): except Exception: report_exception(c, 'callbacks_on_polling') +def app_reload_callback(demo: Optional[Blocks], app: FastAPI): + for c in callback_map['callbacks_on_reload']: + try: + c.callback() + except Exception: + report_exception(c, 'callbacks_on_reload') + + def model_loaded_callback(sd_model): for c in callback_map['callbacks_model_loaded']: try: @@ -195,6 +203,9 @@ def on_polling(callback): """register a function to be called on each polling of the server.""" add_callback(callback_map['callbacks_on_polling'], callback) +def on_before_reload(callback): + """register a function to be called just before the server reloads.""" + add_callback(callback_map['callbacks_on_reload'], callback) def on_model_loaded(callback): """register a function to be called when the stable diffusion model is created; the model is -- cgit v1.2.1 From 3662a274e2b6482c4ad831cc2d7976d919b40212 Mon Sep 17 00:00:00 2001 From: rucadi Date: Fri, 16 Dec 2022 17:10:13 +0100 Subject: Add polling callback --- modules/script_callbacks.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 4bb45ec7..7763936f 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -74,6 +74,7 @@ callback_map = dict( callbacks_infotext_pasted=[], callbacks_script_unloaded=[], callbacks_before_ui=[], + callbacks_on_polling=[], ) @@ -89,6 +90,12 @@ def app_started_callback(demo: Optional[Blocks], app: FastAPI): except Exception: report_exception(c, 'app_started_callback') +def app_polling_callback(demo: Optional[Blocks], app: FastAPI): + for c in callback_map['callbacks_on_polling']: + try: + c.callback() + except Exception: + report_exception(c, 'callbacks_on_polling') def model_loaded_callback(sd_model): for c in callback_map['callbacks_model_loaded']: @@ -227,6 +234,11 @@ def on_app_started(callback): add_callback(callback_map['callbacks_app_started'], callback) +def on_polling(callback): + """register a function to be called on each polling of the server.""" + add_callback(callback_map['callbacks_on_polling'], callback) + + def on_model_loaded(callback): """register a function to be called when the stable diffusion model is created; the model is passed as an argument; this function is also called when the script is reloaded. """ -- cgit v1.2.1 From eb5eb8aa117c3d9ff9c55c59bc589ad8e983919e Mon Sep 17 00:00:00 2001 From: rucadi Date: Fri, 16 Dec 2022 18:31:20 +0100 Subject: Add a callback called before reloading the server --- modules/script_callbacks.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 7763936f..91fd21f4 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -75,6 +75,7 @@ callback_map = dict( callbacks_script_unloaded=[], callbacks_before_ui=[], callbacks_on_polling=[], + callbacks_on_reload=[], ) @@ -82,7 +83,6 @@ def clear_callbacks(): for callback_list in callback_map.values(): callback_list.clear() - def app_started_callback(demo: Optional[Blocks], app: FastAPI): for c in callback_map['callbacks_app_started']: try: @@ -97,6 +97,14 @@ def app_polling_callback(demo: Optional[Blocks], app: FastAPI): except Exception: report_exception(c, 'callbacks_on_polling') +def app_reload_callback(demo: Optional[Blocks], app: FastAPI): + for c in callback_map['callbacks_on_reload']: + try: + c.callback() + except Exception: + report_exception(c, 'callbacks_on_reload') + + def model_loaded_callback(sd_model): for c in callback_map['callbacks_model_loaded']: try: @@ -238,6 +246,9 @@ def on_polling(callback): """register a function to be called on each polling of the server.""" add_callback(callback_map['callbacks_on_polling'], callback) +def on_before_reload(callback): + """register a function to be called just before the server reloads.""" + add_callback(callback_map['callbacks_on_reload'], callback) def on_model_loaded(callback): """register a function to be called when the stable diffusion model is created; the model is -- cgit v1.2.1 From a80d7d090ce19d9b275f6b0d6b8dbbf61a1992e0 Mon Sep 17 00:00:00 2001 From: Rucadi Date: Tue, 21 Mar 2023 18:47:05 +0100 Subject: Update script_callbacks.py --- modules/script_callbacks.py | 1 - 1 file changed, 1 deletion(-) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index f758a1ee..8e80dd8d 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -75,7 +75,6 @@ callback_map = dict( callbacks_script_unloaded=[], callbacks_before_ui=[], callbacks_on_reload=[], - callbacks_on_reload=[], callbacks_on_polling=[], ) -- cgit v1.2.1 From d86beb822832c9162714cf0a3567ad087839a2ac Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Thu, 23 Mar 2023 17:09:59 -0400 Subject: Remove "do not add watermark to images" option --- modules/shared.py | 1 - 1 file changed, 1 deletion(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index f28a12cc..030cd7e0 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -342,7 +342,6 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), - "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"), "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), -- cgit v1.2.1 From b2fc7dba2edead2b2e880ea90bd6b5494115b330 Mon Sep 17 00:00:00 2001 From: kurilee Date: Sat, 25 Mar 2023 22:45:41 +0800 Subject: Add option "keep original size" to textual inversion images preprocess --- modules/textual_inversion/preprocess.py | 10 +++++++--- modules/ui.py | 2 ++ 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 2239cb84..9ad1d3f4 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -11,7 +11,7 @@ from modules.shared import opts, cmd_opts from modules.textual_inversion import autocrop -def preprocess(id_task, process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None): +def preprocess(id_task, process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None): try: if process_caption: shared.interrogator.load() @@ -19,7 +19,7 @@ def preprocess(id_task, process_src, process_dst, process_width, process_height, if process_caption_deepbooru: deepbooru.model.start() - preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug, process_multicrop, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold) + preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug, process_multicrop, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold) finally: @@ -131,7 +131,7 @@ def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, thr return wh and center_crop(image, *wh) -def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None): +def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None): width = process_width height = process_height src = os.path.abspath(process_src) @@ -223,6 +223,10 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre print(f"skipped {img.width}x{img.height} image {filename} (can't find suitable size within error threshold)") process_default_resize = False + if process_keep_original_size: + save_pic(img, index, params, existing_caption=existing_caption) + process_default_resize = False + if process_default_resize: img = images.resize_image(1, img, width, height) save_pic(img, index, params, existing_caption=existing_caption) diff --git a/modules/ui.py b/modules/ui.py index af8546c2..974f2a30 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1098,6 +1098,7 @@ def create_ui(): preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") with gr.Row(): + process_keep_original_size = gr.Checkbox(label='Keep original size', elem_id="train_process_keep_original_size") process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") @@ -1264,6 +1265,7 @@ def create_ui(): process_width, process_height, preprocess_txt_action, + process_keep_original_size, process_flip, process_split, process_caption, -- cgit v1.2.1 From c9647c8d23efa8c939c6af39878784e246082122 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sat, 25 Mar 2023 16:11:41 -0400 Subject: Support Gradio's theme API --- modules/shared.py | 35 +++++++++++++++++++++++++++++++++++ modules/ui.py | 2 +- 2 files changed, 36 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 11be3985..2f7892cd 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -4,6 +4,7 @@ import json import os import sys import time +import requests from PIL import Image import gradio as gr @@ -54,6 +55,21 @@ ui_reorder_categories = [ "scripts", ] +# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json +gradio_hf_hub_themes = [ + "gradio/glass", + "gradio/monochrome", + "gradio/seafoam", + "gradio/soft", + "freddyaboulton/dracula_revamped", + "gradio/dracula_test", + "abidlabs/dracula_test", + "abidlabs/pakistan", + "dawood/microsoft_windows", + "ysharma/steampunk" +] + + cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ @@ -387,6 +403,7 @@ options_templates.update(options_section(('ui', "User interface"), { "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), + "gradio_theme": OptionInfo("Default", "Gradio theme (requires restart)", gr.Dropdown, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}) })) options_templates.update(options_section(('ui', "Live previews"), { @@ -599,6 +616,24 @@ clip_model = None progress_print_out = sys.stdout +gradio_theme = gr.themes.Base() + + +def reload_gradio_theme(theme_name=None): + global gradio_theme + if not theme_name: + theme_name = opts.gradio_theme + + if theme_name == "Default": + gradio_theme = gr.themes.Default() + else: + try: + gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) + except requests.exceptions.ConnectionError: + print("Can't access HuggingFace Hub, falling back to default Gradio theme") + gradio_theme = gr.themes.Default() + + class TotalTQDM: def __init__(self): diff --git a/modules/ui.py b/modules/ui.py index af8546c2..6e049881 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1592,7 +1592,7 @@ def create_ui(): for _interface, label, _ifid in interfaces: shared.tab_names.append(label) - with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo: + with gr.Blocks(css=css, theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: with gr.Row(elem_id="quicksettings", variant="compact"): for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])): component = create_setting_component(k, is_quicksettings=True) -- cgit v1.2.1 From d667fc435f6210575ba50a6f3a05d3853b233caa Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 28 Mar 2023 22:23:40 +0300 Subject: add "resize by" and "resize to" tabs to img2img --- modules/img2img.py | 8 +++++++- modules/ui.py | 48 +++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 52 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/img2img.py b/modules/img2img.py index 953ac5d2..d54728b7 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): processed_image.save(os.path.join(output_dir, filename)) -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): +def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -114,6 +114,12 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s if image is not None: image = ImageOps.exif_transpose(image) + if selected_scale_tab == 1: + assert image, "Can't scale by because no image is selected" + + width = int(image.width * scale_by) + height = int(image.height * scale_by) + assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]' p = StableDiffusionProcessingImg2Img( diff --git a/modules/ui.py b/modules/ui.py index eb5fcd3f..653eb665 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -127,6 +127,16 @@ def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resiz return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}" +def resize_from_to_html(width, height, scale_by): + target_width = int(width * scale_by) + target_height = int(height * scale_by) + + if not target_width or not target_height: + return "no image selected" + + return f"resize: from {width}x{height} to {target_width}x{target_height}" + + def apply_styles(prompt, prompt_neg, styles): prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles) prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles) @@ -673,6 +683,8 @@ def create_ui(): copy_image_buttons.append((button, name, elem)) with gr.Tabs(elem_id="mode_img2img"): + img2img_selected_tab = gr.State(0) + with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img: init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=480) add_copy_image_controls('img2img', init_img) @@ -715,6 +727,12 @@ def create_ui(): img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") + img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] + img2img_image_inputs = [init_img, sketch, init_img_with_mask, inpaint_color_sketch] + + for i, tab in enumerate(img2img_tabs): + tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) + def copy_image(img): if isinstance(img, dict) and 'image' in img: return img['image'] @@ -744,8 +762,30 @@ def create_ui(): elif category == "dimensions": with FormRow(): with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + selected_scale_tab = gr.State(value=0) + + with gr.Tabs(): + with gr.Tab(label="Resize to") as tab_scale_to: + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + + with gr.Tab(label="Resize by") as tab_scale_by: + scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") + + with FormRow(): + scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") + gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") + + scale_by.change( + fn=resize_from_to_html, + _js="currentImg2imgSourceResolution", + inputs=[dummy_component, dummy_component, scale_by], + outputs=scale_by_html, + show_progress=False, + ) + + tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) + tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") @@ -806,7 +846,7 @@ def create_ui(): def select_img2img_tab(tab): return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3), - for i, elem in enumerate([tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch]): + for i, elem in enumerate(img2img_tabs): elem.select( fn=lambda tab=i: select_img2img_tab(tab), inputs=[], @@ -859,8 +899,10 @@ def create_ui(): denoising_strength, seed, subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, + selected_scale_tab, height, width, + scale_by, resize_mode, inpaint_full_res, inpaint_full_res_padding, -- cgit v1.2.1 From 42082e8a3239c1c32cd9e2a03a20b610af857b51 Mon Sep 17 00:00:00 2001 From: devdn Date: Tue, 28 Mar 2023 18:18:28 -0400 Subject: performance increase --- modules/processing.py | 4 +++- modules/sd_samplers_kdiffusion.py | 22 +++++++++++++++++----- modules/shared.py | 1 + 3 files changed, 21 insertions(+), 6 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..9f00ce3c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -105,7 +105,7 @@ class StableDiffusionProcessing: """ The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing """ - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -140,6 +140,7 @@ class StableDiffusionProcessing: self.denoising_strength: float = denoising_strength self.sampler_noise_scheduler_override = None self.ddim_discretize = ddim_discretize or opts.ddim_discretize + self.s_min_uncond = s_min_uncond or opts.s_min_uncond self.s_churn = s_churn or opts.s_churn self.s_tmin = s_tmin or opts.s_tmin self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option @@ -162,6 +163,7 @@ class StableDiffusionProcessing: self.all_seeds = None self.all_subseeds = None self.iteration = 0 + @property def sd_model(self): diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index e9f08518..6a54ce32 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -76,7 +76,7 @@ class CFGDenoiser(torch.nn.Module): return denoised - def forward(self, x, sigma, uncond, cond, cond_scale, image_cond): + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException @@ -116,6 +116,12 @@ class CFGDenoiser(torch.nn.Module): tensor = denoiser_params.text_cond uncond = denoiser_params.text_uncond + sigma_thresh = s_min_uncond + if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_thresh*sigma_thresh) and not is_edit_model): + uncond = torch.zeros([0,0,uncond.shape[2]]) + x_in=x_in[:x_in.shape[0]//2] + sigma_in=sigma_in[:sigma_in.shape[0]//2] + if tensor.shape[1] == uncond.shape[1]: if not is_edit_model: cond_in = torch.cat([tensor, uncond]) @@ -144,7 +150,8 @@ class CFGDenoiser(torch.nn.Module): x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) + if uncond.shape[0]: + x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) cfg_denoised_callback(denoised_params) @@ -157,7 +164,10 @@ class CFGDenoiser(torch.nn.Module): sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) if not is_edit_model: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + if uncond.shape[0]: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + else: + denoised = x_out else: denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) @@ -165,7 +175,6 @@ class CFGDenoiser(torch.nn.Module): denoised = self.init_latent * self.mask + self.nmask * denoised self.step += 1 - return denoised @@ -244,6 +253,7 @@ class KDiffusionSampler: self.model_wrap_cfg.step = 0 self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) self.eta = p.eta if p.eta is not None else opts.eta_ancestral + self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) @@ -326,6 +336,7 @@ class KDiffusionSampler: 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond } samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) @@ -359,7 +370,8 @@ class KDiffusionSampler: 'cond': conditioning, 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond }, disable=False, callback=self.callback_state, **extra_params_kwargs)) return samples diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..0bdd30b8 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -405,6 +405,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + 's_min_uncond': OptionInfo(0, "minimum sigma to use unconditioned guidance", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), -- cgit v1.2.1 From bc90592031d26d3a6ed5c1b65ee9801452b5ece5 Mon Sep 17 00:00:00 2001 From: devdn Date: Tue, 28 Mar 2023 20:59:31 -0400 Subject: increase range of negative guidance minimum sigma option --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 0bdd30b8..0e9f2d54 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -405,7 +405,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - 's_min_uncond': OptionInfo(0, "minimum sigma to use unconditioned guidance", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), + 's_min_uncond': OptionInfo(0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), -- cgit v1.2.1 From 70a0a11783747d2e77a08a63d9feeceb7d2d4f63 Mon Sep 17 00:00:00 2001 From: Vespinian Date: Tue, 28 Mar 2023 23:52:51 -0400 Subject: Changed behavior that puts the args from alwayson_script request in the script_args, so don't accidently resize the arg list if we get less arg then or default list has --- modules/api/api.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 518b2a61..8c5cd185 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -272,7 +272,9 @@ class Api: raise HTTPException(status_code=422, detail=f"Cannot have a selectable script in the always on scripts params") # always on script with no arg should always run so you don't really need to add them to the requests if "args" in request.alwayson_scripts[alwayson_script_name]: - script_args[alwayson_script.args_from:alwayson_script.args_to] = request.alwayson_scripts[alwayson_script_name]["args"] + # min between arg length in scriptrunner and arg length in the request + for idx in range(0, min((alwayson_script.args_to - alwayson_script.args_from), len(request.alwayson_scripts[alwayson_script_name]["args"]))): + script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx] return script_args def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): -- cgit v1.2.1 From 67955ca9e5cb6b3cc539333d0a7d9591009bc800 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 13:04:02 -0500 Subject: Make selected tab configurable with UI config --- modules/ui.py | 30 ++++++++++++++++++++++++------ modules/ui_extensions.py | 6 +++--- modules/ui_extra_networks.py | 8 ++++---- modules/ui_postprocessing.py | 6 +++--- 4 files changed, 34 insertions(+), 16 deletions(-) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index 627fbe0b..3595b20d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -94,6 +94,9 @@ def send_gradio_gallery_to_image(x): def visit(x, func, path=""): if hasattr(x, 'children'): + if isinstance(x, gr.Tabs) and x.elem_id is not None: + # Tabs element can't have a label, have to use elem_id instead + func(f"{path}/Tabs@{x.elem_id}", x) for c in x.children: visit(c, func, path) elif x.label is not None: @@ -1048,7 +1051,7 @@ def create_ui(): with gr.Row(variant="compact").style(equal_height=False): with gr.Tabs(elem_id="train_tabs"): - with gr.Tab(label="Create embedding"): + with gr.Tab(label="Create embedding", id="create_embedding"): new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") @@ -1061,7 +1064,7 @@ def create_ui(): with gr.Column(): create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") - with gr.Tab(label="Create hypernetwork"): + with gr.Tab(label="Create hypernetwork", id="create_hypernetwork"): new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") @@ -1079,7 +1082,7 @@ def create_ui(): with gr.Column(): create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") - with gr.Tab(label="Preprocess images"): + with gr.Tab(label="Preprocess images", id="preprocess_images"): process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") @@ -1146,7 +1149,7 @@ def create_ui(): def get_textual_inversion_template_names(): return sorted([x for x in textual_inversion.textual_inversion_templates]) - with gr.Tab(label="Train"): + with gr.Tab(label="Train", id="train"): gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") with FormRow(): train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) @@ -1479,7 +1482,7 @@ def create_ui(): current_row.__exit__() current_tab.__exit__() - with gr.TabItem("Actions"): + with gr.TabItem("Actions", id="actions"): request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications") download_localization = gr.Button(value='Download localization template', elem_id="download_localization") reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") @@ -1487,7 +1490,7 @@ def create_ui(): unload_sd_model = gr.Button(value='Unload SD checkpoint to free VRAM', elem_id="sett_unload_sd_model") reload_sd_model = gr.Button(value='Reload the last SD checkpoint back into VRAM', elem_id="sett_reload_sd_model") - with gr.TabItem("Licenses"): + with gr.TabItem("Licenses", id="licenses"): gr.HTML(shared.html("licenses.html"), elem_id="licenses") gr.Button(value="Show all pages", elem_id="settings_show_all_pages") @@ -1735,12 +1738,27 @@ def create_ui(): apply_field(x, 'value', check_dropdown, getattr(x, 'init_field', None)) + def check_tab_id(tab_id): + tab_items = list(filter(lambda e: isinstance(e, gr.TabItem), x.children)) + if type(tab_id) == str: + tab_ids = [t.id for t in tab_items] + return tab_id in tab_ids + elif type(tab_id) == int: + return tab_id >= 0 and tab_id < len(tab_items) + else: + return False + + if type(x) == gr.Tabs: + apply_field(x, 'selected', check_tab_id) + visit(txt2img_interface, loadsave, "txt2img") visit(img2img_interface, loadsave, "img2img") visit(extras_interface, loadsave, "extras") visit(modelmerger_interface, loadsave, "modelmerger") visit(train_interface, loadsave, "train") + loadsave(f"webui/Tabs@{tabs.elem_id}", tabs) + if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)): with open(ui_config_file, "w", encoding="utf8") as file: json.dump(ui_settings, file, indent=4) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index efd6cda2..5ec11f03 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -294,7 +294,7 @@ def create_ui(): with gr.Blocks(analytics_enabled=False) as ui: with gr.Tabs(elem_id="tabs_extensions") as tabs: - with gr.TabItem("Installed"): + with gr.TabItem("Installed", id="installed"): with gr.Row(elem_id="extensions_installed_top"): apply = gr.Button(value="Apply and restart UI", variant="primary") @@ -327,7 +327,7 @@ def create_ui(): outputs=[extensions_table, info], ) - with gr.TabItem("Available"): + with gr.TabItem("Available", id="available"): with gr.Row(): refresh_available_extensions_button = gr.Button(value="Load from:", variant="primary") available_extensions_index = gr.Text(value="https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui-extensions/master/index.json", label="Extension index URL").style(container=False) @@ -374,7 +374,7 @@ def create_ui(): outputs=[available_extensions_table, install_result] ) - with gr.TabItem("Install from URL"): + with gr.TabItem("Install from URL", id="install_from_url"): install_url = gr.Text(label="URL for extension's git repository") install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto") install_button = gr.Button(value="Install", variant="primary") diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 25eb464b..ad98f083 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -241,9 +241,9 @@ def create_ui(container, button, tabname): with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs: for page in ui.stored_extra_pages: - with gr.Tab(page.title): + with gr.Tab(page.title, id=page.title.lower().replace(" ", "_")): - page_elem = gr.HTML(page.create_html(ui.tabname)) + page_elem = gr.HTML("") ui.pages.append(page_elem) filter = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", placeholder="Search...", visible=False) @@ -284,7 +284,7 @@ def setup_ui(ui, gallery): def save_preview(index, images, filename): if len(images) == 0: print("There is no image in gallery to save as a preview.") - return [page.create_html(ui.tabname) for page in ui.stored_extra_pages] + return ["" for page in ui.stored_extra_pages] index = int(index) index = 0 if index < 0 else index @@ -309,7 +309,7 @@ def setup_ui(ui, gallery): else: image.save(filename) - return [page.create_html(ui.tabname) for page in ui.stored_extra_pages] + return ["" for page in ui.stored_extra_pages] ui.button_save_preview.click( fn=save_preview, diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index b418d955..81decfc4 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -9,13 +9,13 @@ def create_ui(): with gr.Row().style(equal_height=False, variant='compact'): with gr.Column(variant='compact'): with gr.Tabs(elem_id="mode_extras"): - with gr.TabItem('Single Image', elem_id="extras_single_tab") as tab_single: + with gr.TabItem('Single Image', id="single_image", elem_id="extras_single_tab") as tab_single: extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image") - with gr.TabItem('Batch Process', elem_id="extras_batch_process_tab") as tab_batch: + with gr.TabItem('Batch Process', id="batch_process", elem_id="extras_batch_process_tab") as tab_batch: image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file", elem_id="extras_image_batch") - with gr.TabItem('Batch from Directory', elem_id="extras_batch_directory_tab") as tab_batch_dir: + with gr.TabItem('Batch from Directory', id="batch_from_directory", elem_id="extras_batch_directory_tab") as tab_batch_dir: extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.", elem_id="extras_batch_input_dir") extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.", elem_id="extras_batch_output_dir") show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results") -- cgit v1.2.1 From ad5afcaae0b47e9e68b49aacf04cc3ad59d41a8e Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 16:46:03 -0500 Subject: Save/restore working webui/extension configs --- modules/extensions.py | 25 +++++-- modules/paths_internal.py | 1 + modules/shared.py | 1 + modules/ui_extensions.py | 186 +++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 204 insertions(+), 9 deletions(-) (limited to 'modules') diff --git a/modules/extensions.py b/modules/extensions.py index 3a7a0372..a87beaa3 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -3,10 +3,11 @@ import sys import traceback import time +from datetime import datetime import git from modules import shared -from modules.paths_internal import extensions_dir, extensions_builtin_dir +from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path extensions = [] @@ -31,12 +32,15 @@ class Extension: self.status = '' self.can_update = False self.is_builtin = is_builtin + self.commit_hash = '' + self.commit_date = None self.version = '' + self.branch = None self.remote = None self.have_info_from_repo = False def read_info_from_repo(self): - if self.have_info_from_repo: + if self.is_builtin or self.have_info_from_repo: return self.have_info_from_repo = True @@ -56,10 +60,15 @@ class Extension: self.status = 'unknown' self.remote = next(repo.remote().urls, None) head = repo.head.commit - ts = time.asctime(time.gmtime(repo.head.commit.committed_date)) - self.version = f'{head.hexsha[:8]} ({ts})' - - except Exception: + self.commit_date = repo.head.commit.committed_date + ts = time.asctime(time.gmtime(self.commit_date)) + if repo.active_branch: + self.branch = repo.active_branch.name + self.commit_hash = head.hexsha + self.version = f'{self.commit_hash[:8]} ({ts})' + + except Exception as ex: + print(f"Failed reading extension data from Git repository ({self.name}): {ex}", file=sys.stderr) self.remote = None def list_files(self, subdir, extension): @@ -88,12 +97,12 @@ class Extension: self.can_update = False self.status = "latest" - def fetch_and_reset_hard(self): + def fetch_and_reset_hard(self, commit='origin'): repo = git.Repo(self.path) # Fix: `error: Your local changes to the following files would be overwritten by merge`, # because WSL2 Docker set 755 file permissions instead of 644, this results to the error. repo.git.fetch(all=True) - repo.git.reset('origin', hard=True) + repo.git.reset(commit, hard=True) def list_extensions(): diff --git a/modules/paths_internal.py b/modules/paths_internal.py index 926ec3bb..6765bafe 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -20,3 +20,4 @@ data_path = cmd_opts_pre.data_dir models_path = os.path.join(data_path, "models") extensions_dir = os.path.join(data_path, "extensions") extensions_builtin_dir = os.path.join(script_path, "extensions-builtin") +config_states_dir = os.path.join(script_path, "config_states") diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..ffc5e4fe 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -424,6 +424,7 @@ options_templates.update(options_section(('postprocessing', "Postprocessing"), { options_templates.update(options_section((None, "Hidden options"), { "disabled_extensions": OptionInfo([], "Disable these extensions"), "disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}), + "restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"), "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), })) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index efd6cda2..ed677b3e 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -2,6 +2,7 @@ import json import os.path import sys import time +from datetime import datetime import traceback import git @@ -11,7 +12,8 @@ import html import shutil import errno -from modules import extensions, shared, paths +from modules import extensions, shared, paths, config_states +from modules.paths_internal import config_states_dir from modules.call_queue import wrap_gradio_gpu_call available_extensions = {"extensions": []} @@ -30,6 +32,9 @@ def apply_and_restart(disable_list, update_list, disable_all): update = json.loads(update_list) assert type(update) == list, f"wrong update_list data for apply_and_restart: {update_list}" + if update: + save_config_state("Backup (pre-update)") + update = set(update) for ext in extensions.extensions: @@ -50,6 +55,48 @@ def apply_and_restart(disable_list, update_list, disable_all): shared.state.need_restart = True +def save_config_state(name): + current_config_state = config_states.get_config() + if not name: + name = "Config" + current_config_state["name"] = name + filename = os.path.join(config_states_dir, datetime.now().strftime("%Y_%m_%d-%H_%M_%S") + ".json") + print(f"Saving backup of webui/extension state to {filename}.") + with open(filename, "w", encoding="utf-8") as f: + json.dump(current_config_state, f) + config_states.list_config_states() + new_value = next(iter(config_states.all_config_states.keys()), "Current") + new_choices = ["Current"] + list(config_states.all_config_states.keys()) + return gr.Dropdown.update(value=new_value, choices=new_choices), f"Saved current webui/extension state to '{filename}'" + + +def restore_config_state(confirmed, config_state_name, restore_type): + if config_state_name == "Current": + return "Select a config to restore from." + if not confirmed: + return "Cancelled." + + check_access() + + save_config_state("Backup (pre-restore)") + + config_state = config_states.all_config_states[config_state_name] + + print(f"Restoring webui state from backup: {restore_type}") + + if restore_type == "extensions" or restore_type == "both": + shared.opts.restore_config_state_file = config_state["filename"] + shared.opts.save(shared.config_filename) + + if restore_type == "webui" or restore_type == "both": + config_states.restore_webui_config(config_state) + + shared.state.interrupt() + shared.state.need_restart = True + + return "" + + def check_updates(id_task, disable_list): check_access() @@ -121,6 +168,117 @@ def extension_table(): return code +def update_config_states_table(state_name): + if state_name == "Current": + config_state = config_states.get_config() + else: + config_state = config_states.all_config_states[state_name] + + config_name = config_state.get("name", "Config") + created_date = time.asctime(time.gmtime(config_state["created_at"])) + + code = f"""""" + + webui_remote = config_state["webui"]["remote"] or "" + webui_branch = config_state["webui"]["branch"] + webui_commit_hash = config_state["webui"]["commit_hash"] + if webui_commit_hash: + webui_commit_hash = webui_commit_hash[:8] + else: + webui_commit_hash = "" + webui_commit_date = config_state["webui"]["commit_date"] + if webui_commit_date: + webui_commit_date = time.asctime(time.gmtime(webui_commit_date)) + else: + webui_commit_date = "" + + code += f"""

Config Backup: {config_name}

+ Created at: {created_date}""" + + code += f"""

WebUI State

+ + + + + + + + + + + + + + + + + +
URLBranchCommitDate
{webui_remote}{webui_branch}{webui_commit_hash}{webui_commit_date}
+ """ + + code += """

Extension State

+ + + + + + + + + + + + """ + + ext_map = {ext.name: ext for ext in extensions.extensions} + + for ext_name, ext_conf in config_state["extensions"].items(): + ext_remote = ext_conf["remote"] or "" + ext_branch = ext_conf["branch"] or "" + ext_enabled = ext_conf["enabled"] + ext_commit_hash = ext_conf["commit_hash"] or "" + ext_commit_date = ext_conf["commit_date"] + if ext_commit_date: + ext_commit_date = time.asctime(time.gmtime(ext_commit_date)) + else: + ext_commit_date = "" + + remote = f"""{html.escape(ext_remote or '')}""" + + style_enabled = "" + style_remote = "" + style_branch = "" + style_commit = "" + if ext_name in ext_map: + current_ext = ext_map[ext_name] + current_ext.read_info_from_repo() + if current_ext.enabled != ext_enabled: + style_enabled = ' style="color: var(--primary-400)"' + if current_ext.remote != ext_remote: + style_remote = ' style="color: var(--primary-400)"' + if current_ext.branch != ext_branch: + style_branch = ' style="color: var(--primary-400)"' + if current_ext.commit_hash != ext_commit_hash: + style_commit = ' style="color: var(--primary-400)"' + + code += f""" + + + + + + + + """ + + code += """ + +
ExtensionURLBranchCommitDate
{html.escape(ext_name)}{remote}{ext_branch}{ext_commit_hash[:8]}{ext_commit_date}
+ """ + + return code + + def normalize_git_url(url): if url is None: return "" @@ -292,6 +450,8 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=" def create_ui(): import modules.ui + config_states.list_config_states() + with gr.Blocks(analytics_enabled=False) as ui: with gr.Tabs(elem_id="tabs_extensions") as tabs: with gr.TabItem("Installed"): @@ -386,4 +546,28 @@ def create_ui(): outputs=[extensions_table, install_result], ) + with gr.TabItem("Backup/Restore"): + with gr.Row(elem_id="extensions_backup_top_row"): + config_states_list = gr.Dropdown(label="Saved Configs", elem_id="extension_backup_saved_configs", value="Current", choices=["Current"] + list(config_states.all_config_states.keys())) + modules.ui.create_refresh_button(config_states_list, config_states.list_config_states, lambda: {"choices": ["Current"] + list(config_states.all_config_states.keys())}, "refresh_config_states") + config_restore_type = gr.Radio(label="State to restore", choices=["extensions", "webui", "both"], value="extensions", elem_id="extension_backup_restore_type") + config_restore_button = gr.Button(value="Restore Selected Config", variant="primary", elem_id="extension_backup_restore") + with gr.Row(elem_id="extensions_backup_top_row2"): + config_save_name = gr.Textbox("", placeholder="Config Name", show_label=False) + config_save_button = gr.Button(value="Save Current Config") + + config_states_info = gr.HTML("") + config_states_table = gr.HTML(lambda: update_config_states_table("Current")) + + config_save_button.click(fn=save_config_state, inputs=[config_save_name], outputs=[config_states_list, config_states_info]) + + dummy_component = gr.Label(visible=False) + config_restore_button.click(fn=restore_config_state, _js="config_state_confirm_restore", inputs=[dummy_component, config_states_list, config_restore_type], outputs=[config_states_info]) + + config_states_list.change( + fn=update_config_states_table, + inputs=[config_states_list], + outputs=[config_states_table], + ) + return ui -- cgit v1.2.1 From f22d0dde4e57444b2d4fe997338550bb82bb249e Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 18:32:29 -0500 Subject: Better checking of extension state from Git info --- modules/extensions.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/extensions.py b/modules/extensions.py index a87beaa3..34d9d654 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -91,9 +91,20 @@ class Extension: for fetch in repo.remote().fetch(dry_run=True): if fetch.flags != fetch.HEAD_UPTODATE: self.can_update = True - self.status = "behind" + self.status = "new commits" return + try: + origin = repo.rev_parse('origin') + if repo.head.commit != origin: + self.can_update = True + self.status = "behind HEAD" + return + except Exception: + self.can_update = False + self.status = "unknown (remote error)" + return + self.can_update = False self.status = "latest" @@ -103,6 +114,7 @@ class Extension: # because WSL2 Docker set 755 file permissions instead of 644, this results to the error. repo.git.fetch(all=True) repo.git.reset(commit, hard=True) + self.have_info_from_repo = False def list_extensions(): -- cgit v1.2.1 From f3320b802c12f29e5a3201fcc0abfe72be294293 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 18:32:54 -0500 Subject: Various UI fixes in config state tab --- modules/config_states.py | 200 +++++++++++++++++++++++++++++++++++++++++++++++ modules/ui_extensions.py | 45 ++++++----- 2 files changed, 226 insertions(+), 19 deletions(-) create mode 100644 modules/config_states.py (limited to 'modules') diff --git a/modules/config_states.py b/modules/config_states.py new file mode 100644 index 00000000..2ea00929 --- /dev/null +++ b/modules/config_states.py @@ -0,0 +1,200 @@ +""" +Supports saving and restoring webui and extensions from a known working set of commits +""" + +import os +import sys +import traceback +import json +import time +import tqdm + +from datetime import datetime +from collections import OrderedDict +import git + +from modules import shared, extensions +from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path, config_states_dir + + +all_config_states = OrderedDict() + + +def list_config_states(): + global all_config_states + + all_config_states.clear() + os.makedirs(config_states_dir, exist_ok=True) + + config_states = [] + for filename in os.listdir(config_states_dir): + if filename.endswith(".json"): + path = os.path.join(config_states_dir, filename) + with open(path, "r", encoding="utf-8") as f: + j = json.load(f) + j["filepath"] = path + config_states.append(j) + + config_states = list(sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)) + + for cs in config_states: + timestamp = time.asctime(time.gmtime(cs["created_at"])) + name = cs.get("name", "Config") + full_name = f"{name}: {timestamp}" + all_config_states[full_name] = cs + + return all_config_states + + +def get_webui_config(): + webui_repo = None + + try: + if os.path.exists(os.path.join(script_path, ".git")): + webui_repo = git.Repo(script_path) + except Exception: + print(f"Error reading webui git info from {script_path}:", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + + webui_remote = None + webui_commit_hash = None + webui_commit_date = None + webui_branch = None + if webui_repo and not webui_repo.bare: + try: + webui_remote = next(webui_repo.remote().urls, None) + head = webui_repo.head.commit + webui_commit_date = webui_repo.head.commit.committed_date + webui_commit_hash = head.hexsha + webui_branch = webui_repo.active_branch.name + + except Exception: + webui_remote = None + + return { + "remote": webui_remote, + "commit_hash": webui_commit_hash, + "commit_date": webui_commit_date, + "branch": webui_branch, + } + + +def get_extension_config(): + ext_config = {} + + for ext in extensions.extensions: + entry = { + "name": ext.name, + "path": ext.path, + "enabled": ext.enabled, + "is_builtin": ext.is_builtin, + "remote": ext.remote, + "commit_hash": ext.commit_hash, + "commit_date": ext.commit_date, + "branch": ext.branch, + "have_info_from_repo": ext.have_info_from_repo + } + + ext_config[ext.name] = entry + + return ext_config + + +def get_config(): + creation_time = datetime.now().timestamp() + webui_config = get_webui_config() + ext_config = get_extension_config() + + return { + "created_at": creation_time, + "webui": webui_config, + "extensions": ext_config + } + + +def restore_webui_config(config): + print("* Restoring webui state...") + + if "webui" not in config: + print("Error: No webui data saved to config") + return + + webui_config = config["webui"] + + if "commit_hash" not in webui_config: + print("Error: No commit saved to webui config") + return + + webui_commit_hash = webui_config.get("commit_hash", None) + webui_repo = None + + try: + if os.path.exists(os.path.join(script_path, ".git")): + webui_repo = git.Repo(script_path) + except Exception: + print(f"Error reading webui git info from {script_path}:", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + return + + try: + webui_repo.git.fetch(all=True) + webui_repo.git.reset(webui_commit_hash, hard=True) + print(f"* Restored webui to commit {webui_commit_hash}.") + except Exception: + print(f"Error restoring webui to commit {webui_commit_hash}:", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + + +def restore_extension_config(config): + print("* Restoring extension state...") + + if "extensions" not in config: + print("Error: No extension data saved to config") + return + + ext_config = config["extensions"] + + results = [] + disabled = [] + + for ext in tqdm.tqdm(extensions.extensions): + if ext.is_builtin: + continue + + ext.read_info_from_repo() + current_commit = ext.commit_hash + + if ext.name not in ext_config: + ext.disabled = True + disabled.append(ext.name) + results.append((ext, current_commit[:8], False, "Saved extension state not found in config, marking as disabled")) + continue + + entry = ext_config[ext.name] + + if "commit_hash" in entry and entry["commit_hash"]: + try: + ext.fetch_and_reset_hard(entry["commit_hash"]) + ext.read_info_from_repo() + if current_commit != entry["commit_hash"]: + results.append((ext, current_commit[:8], True, entry["commit_hash"][:8])) + except Exception as ex: + results.append((ext, current_commit[:8], False, ex)) + else: + results.append((ext, current_commit[:8], False, "No commit hash found in config")) + + if not entry.get("enabled", False): + ext.disabled = True + disabled.append(ext.name) + else: + ext.disabled = False + + shared.opts.disabled_extensions = disabled + shared.opts.save(shared.config_filename) + + print("* Finished restoring extensions. Results:") + for ext, prev_commit, success, result in results: + if success: + print(f" + {ext.name}: {prev_commit} -> {result}") + else: + print(f" ! {ext.name}: FAILURE ({result})") diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index ed677b3e..b94b3a3a 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -17,6 +17,7 @@ from modules.paths_internal import config_states_dir from modules.call_queue import wrap_gradio_gpu_call available_extensions = {"extensions": []} +STYLE_PRIMARY = ' style="color: var(--primary-400)"' def check_access(): @@ -67,7 +68,7 @@ def save_config_state(name): config_states.list_config_states() new_value = next(iter(config_states.all_config_states.keys()), "Current") new_choices = ["Current"] + list(config_states.all_config_states.keys()) - return gr.Dropdown.update(value=new_value, choices=new_choices), f"Saved current webui/extension state to '{filename}'" + return gr.Dropdown.update(value=new_value, choices=new_choices), f"Saved current webui/extension state to \"{filename}\"" def restore_config_state(confirmed, config_state_name, restore_type): @@ -78,14 +79,12 @@ def restore_config_state(confirmed, config_state_name, restore_type): check_access() - save_config_state("Backup (pre-restore)") - config_state = config_states.all_config_states[config_state_name] - print(f"Restoring webui state from backup: {restore_type}") + print(f"*** Restoring webui state from backup: {restore_type} ***") if restore_type == "extensions" or restore_type == "both": - shared.opts.restore_config_state_file = config_state["filename"] + shared.opts.restore_config_state_file = config_state["filepath"] shared.opts.save(shared.config_filename) if restore_type == "webui" or restore_type == "both": @@ -149,7 +148,7 @@ def extension_table(): style = "" if shared.opts.disable_all_extensions == "extra" and not ext.is_builtin or shared.opts.disable_all_extensions == "all": - style = ' style="color: var(--primary-400)"' + style = STYLE_PRIMARY code += f""" @@ -181,17 +180,25 @@ def update_config_states_table(state_name): webui_remote = config_state["webui"]["remote"] or "" webui_branch = config_state["webui"]["branch"] - webui_commit_hash = config_state["webui"]["commit_hash"] - if webui_commit_hash: - webui_commit_hash = webui_commit_hash[:8] - else: - webui_commit_hash = "" + webui_commit_hash = config_state["webui"]["commit_hash"] or "" webui_commit_date = config_state["webui"]["commit_date"] if webui_commit_date: webui_commit_date = time.asctime(time.gmtime(webui_commit_date)) else: webui_commit_date = "" + current_webui = config_states.get_webui_config() + + style_remote = "" + style_branch = "" + style_commit = "" + if current_webui["remote"] != webui_remote: + style_remote = STYLE_PRIMARY + if current_webui["branch"] != webui_branch: + style_branch = STYLE_PRIMARY + if current_webui["commit_hash"] != webui_commit_hash: + style_commit = STYLE_PRIMARY + code += f"""

Config Backup: {config_name}

Created at: {created_date}""" @@ -207,10 +214,10 @@ def update_config_states_table(state_name): - {webui_remote} - {webui_branch} - {webui_commit_hash} - {webui_commit_date} + {webui_remote} + {webui_branch} + {webui_commit_hash[:8]} + {webui_commit_date} @@ -253,13 +260,13 @@ def update_config_states_table(state_name): current_ext = ext_map[ext_name] current_ext.read_info_from_repo() if current_ext.enabled != ext_enabled: - style_enabled = ' style="color: var(--primary-400)"' + style_enabled = STYLE_PRIMARY if current_ext.remote != ext_remote: - style_remote = ' style="color: var(--primary-400)"' + style_remote = STYLE_PRIMARY if current_ext.branch != ext_branch: - style_branch = ' style="color: var(--primary-400)"' + style_branch = STYLE_PRIMARY if current_ext.commit_hash != ext_commit_hash: - style_commit = ' style="color: var(--primary-400)"' + style_commit = STYLE_PRIMARY code += f""" -- cgit v1.2.1 From 9b1fa8298127b05c71c4de04bd6f64b72540ef5a Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 18:55:57 -0500 Subject: Add filename to UI and config name to filename --- modules/ui_extensions.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index b94b3a3a..b1ef38e2 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -61,7 +61,7 @@ def save_config_state(name): if not name: name = "Config" current_config_state["name"] = name - filename = os.path.join(config_states_dir, datetime.now().strftime("%Y_%m_%d-%H_%M_%S") + ".json") + filename = os.path.join(config_states_dir, datetime.now().strftime("%Y_%m_%d-%H_%M_%S") + "_" + name + ".json") print(f"Saving backup of webui/extension state to {filename}.") with open(filename, "w", encoding="utf-8") as f: json.dump(current_config_state, f) @@ -175,6 +175,7 @@ def update_config_states_table(state_name): config_name = config_state.get("name", "Config") created_date = time.asctime(time.gmtime(config_state["created_at"])) + filepath = config_state.get("filepath", "") code = f"""""" @@ -200,6 +201,7 @@ def update_config_states_table(state_name): style_commit = STYLE_PRIMARY code += f"""

Config Backup: {config_name}

+ Filepath: {filepath} Created at: {created_date}""" code += f"""

WebUI State

-- cgit v1.2.1 From 64bbd3bf030aac9068df6bbe79c852e97cf6dbde Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 19:00:51 -0500 Subject: Make into divs --- modules/ui_extensions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index b1ef38e2..3735965c 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -201,8 +201,8 @@ def update_config_states_table(state_name): style_commit = STYLE_PRIMARY code += f"""

Config Backup: {config_name}

- Filepath: {filepath} - Created at: {created_date}""" +
Filepath: {filepath}
+
Created at: {created_date}
""" code += f"""

WebUI State

-- cgit v1.2.1 From 1c0544abdbffb910837d285857515b932a073f89 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 19:21:57 -0500 Subject: Add links for commits in table, if remote is from GitHub --- modules/ui_extensions.py | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) (limited to 'modules') diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 3735965c..0843a7c0 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -122,6 +122,16 @@ def check_updates(id_task, disable_list): return extension_table(), "" +def make_commit_link(commit_hash, remote, text=None): + if text is None: + text = commit_hash[:8] + if remote.startswith("https://github.com/"): + href = os.path.join(remote, "commit", commit_hash) + return f'{text}' + else: + return text + + def extension_table(): code = f"""
@@ -150,11 +160,15 @@ def extension_table(): if shared.opts.disable_all_extensions == "extra" and not ext.is_builtin or shared.opts.disable_all_extensions == "all": style = STYLE_PRIMARY + version_link = ext.version + if ext.commit_hash and ext.remote: + version_link = make_commit_link(ext.commit_hash, ext.remote, ext.version) + code += f""" - + {ext_status} """ @@ -200,6 +214,9 @@ def update_config_states_table(state_name): if current_webui["commit_hash"] != webui_commit_hash: style_commit = STYLE_PRIMARY + commit_link = make_commit_link(webui_commit_hash, webui_remote) + date_link = make_commit_link(webui_commit_hash, webui_remote, webui_commit_date) + code += f"""

Config Backup: {config_name}

Filepath: {filepath}
Created at: {created_date}
""" @@ -218,8 +235,8 @@ def update_config_states_table(state_name): - - + +
{html.escape(ext.name)} {remote}{ext.version}{version_link}
{webui_remote} {webui_branch}{webui_commit_hash[:8]}{webui_commit_date}{commit_link}{date_link}
@@ -270,13 +287,16 @@ def update_config_states_table(state_name): if current_ext.commit_hash != ext_commit_hash: style_commit = STYLE_PRIMARY + commit_link = make_commit_link(ext_commit_hash, ext_remote) + date_link = make_commit_link(ext_commit_hash, ext_remote, ext_commit_date) + code += f""" {html.escape(ext_name)} {remote} {ext_branch} - {ext_commit_hash[:8]} - {ext_commit_date} + {commit_link} + {date_link} """ -- cgit v1.2.1 From 3ccf6f5ae8e57233200f9cac52da8d5d88ecee93 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 19:26:52 -0500 Subject: Add webui link --- modules/ui_extensions.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'modules') diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 0843a7c0..af9e92cc 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -202,6 +202,10 @@ def update_config_states_table(state_name): else: webui_commit_date = "" + remote = f"""{html.escape(webui_remote or '')}""" + commit_link = make_commit_link(webui_commit_hash, webui_remote) + date_link = make_commit_link(webui_commit_hash, webui_remote, webui_commit_date) + current_webui = config_states.get_webui_config() style_remote = "" @@ -214,9 +218,6 @@ def update_config_states_table(state_name): if current_webui["commit_hash"] != webui_commit_hash: style_commit = STYLE_PRIMARY - commit_link = make_commit_link(webui_commit_hash, webui_remote) - date_link = make_commit_link(webui_commit_hash, webui_remote, webui_commit_date) - code += f"""

Config Backup: {config_name}

Filepath: {filepath}
Created at: {created_date}
""" @@ -233,7 +234,7 @@ def update_config_states_table(state_name): - {webui_remote} + {remote} {webui_branch} {commit_link} {date_link} @@ -270,6 +271,8 @@ def update_config_states_table(state_name): ext_commit_date = "" remote = f"""{html.escape(ext_remote or '')}""" + commit_link = make_commit_link(ext_commit_hash, ext_remote) + date_link = make_commit_link(ext_commit_hash, ext_remote, ext_commit_date) style_enabled = "" style_remote = "" @@ -287,9 +290,6 @@ def update_config_states_table(state_name): if current_ext.commit_hash != ext_commit_hash: style_commit = STYLE_PRIMARY - commit_link = make_commit_link(ext_commit_hash, ext_remote) - date_link = make_commit_link(ext_commit_hash, ext_remote, ext_commit_date) - code += f""" {html.escape(ext_name)} -- cgit v1.2.1 From 44e8e9c36807d4a71c2fc84129ebcf5ba4f77f21 Mon Sep 17 00:00:00 2001 From: devdn Date: Thu, 30 Mar 2023 00:54:28 -0400 Subject: fix live preview & alternate uncond guidance for better quality --- modules/sd_samplers_kdiffusion.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'modules') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 6a54ce32..17d24df4 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -116,11 +116,13 @@ class CFGDenoiser(torch.nn.Module): tensor = denoiser_params.text_cond uncond = denoiser_params.text_uncond - sigma_thresh = s_min_uncond - if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_thresh*sigma_thresh) and not is_edit_model): - uncond = torch.zeros([0,0,uncond.shape[2]]) - x_in=x_in[:x_in.shape[0]//2] - sigma_in=sigma_in[:sigma_in.shape[0]//2] + if self.step % 2 and s_min_uncond > 0 and not is_edit_model: + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + sigma_threshold = s_min_uncond + if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_threshold*sigma_threshold) ): + uncond = torch.zeros([0,0,uncond.shape[2]]) + x_in=x_in[:x_in.shape[0]//2] + sigma_in=sigma_in[:sigma_in.shape[0]//2] if tensor.shape[1] == uncond.shape[1]: if not is_edit_model: @@ -159,7 +161,7 @@ class CFGDenoiser(torch.nn.Module): devices.test_for_nans(x_out, "unet") if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(x_out[0:uncond.shape[0]]) + sd_samplers_common.store_latent(x_out[0:x_out.shape[0]-uncond.shape[0]]) elif opts.live_preview_content == "Negative prompt": sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) -- cgit v1.2.1 From dbca512154341bb13e1b15d207176f2d403aff30 Mon Sep 17 00:00:00 2001 From: siutin Date: Fri, 3 Feb 2023 03:13:03 +0800 Subject: add an internal API for obtaining current task id --- modules/progress.py | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'modules') diff --git a/modules/progress.py b/modules/progress.py index c69ecf3d..05032ac5 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -4,6 +4,7 @@ import time import gradio as gr from pydantic import BaseModel, Field +from typing import List from modules.shared import opts @@ -37,6 +38,9 @@ def add_task_to_queue(id_job): pending_tasks[id_job] = time.time() +class CurrentTaskResponse(BaseModel): + current_task: str = Field(default=None, title="Task ID", description="id of the current progress task") + class ProgressRequest(BaseModel): id_task: str = Field(default=None, title="Task ID", description="id of the task to get progress for") id_live_preview: int = Field(default=-1, title="Live preview image ID", description="id of last received last preview image") @@ -56,6 +60,8 @@ class ProgressResponse(BaseModel): def setup_progress_api(app): return app.add_api_route("/internal/progress", progressapi, methods=["POST"], response_model=ProgressResponse) +def setup_current_task_api(app): + return app.add_api_route("/internal/current_task", current_task_api, methods=["GET"], response_model=CurrentTaskResponse) def progressapi(req: ProgressRequest): active = req.id_task == current_task @@ -97,3 +103,5 @@ def progressapi(req: ProgressRequest): return ProgressResponse(active=active, queued=queued, completed=completed, progress=progress, eta=eta, live_preview=live_preview, id_live_preview=id_live_preview, textinfo=shared.state.textinfo) +def current_task_api(): + return CurrentTaskResponse(current_task=current_task) \ No newline at end of file -- cgit v1.2.1 From 9407f1731aa8c112ffc0efaa611a76f7fead3d0c Mon Sep 17 00:00:00 2001 From: siutin Date: Mon, 6 Feb 2023 03:53:05 +0800 Subject: store the last generated result --- modules/call_queue.py | 1 + modules/progress.py | 10 ++++++++++ 2 files changed, 11 insertions(+) (limited to 'modules') diff --git a/modules/call_queue.py b/modules/call_queue.py index 92097c15..30ac26bc 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -37,6 +37,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None): res = func(*args, **kwargs) finally: progress.finish_task(id_task) + progress.set_last_task_result(id_task, res) shared.state.end() diff --git a/modules/progress.py b/modules/progress.py index 05032ac5..27a336ad 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -37,6 +37,16 @@ def finish_task(id_task): def add_task_to_queue(id_job): pending_tasks[id_job] = time.time() +last_task_id = None +last_task_result = None + +def set_last_task_result(id_job, result): + global last_task_id + global last_task_result + + last_task_id = id_job + last_task_result = result + class CurrentTaskResponse(BaseModel): current_task: str = Field(default=None, title="Task ID", description="id of the current progress task") -- cgit v1.2.1 From 4242e194e417ec5008d09ec6d756594ac65f77bd Mon Sep 17 00:00:00 2001 From: siutin Date: Mon, 6 Feb 2023 03:55:31 +0800 Subject: add a button to restore the current progress --- modules/progress.py | 14 ++++++++++++++ modules/ui.py | 34 ++++++++++++++++++++++++++++++---- 2 files changed, 44 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/progress.py b/modules/progress.py index 27a336ad..36963c92 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -48,6 +48,20 @@ def set_last_task_result(id_job, result): last_task_result = result +def restore_progress_call(task_tag): + if current_task is None or not current_task[5:-1].startswith(task_tag): + + # image, generation_info, html_info, html_log + return tuple(list([None, None, None, None])) + + else: + + t_task = current_task + while t_task != last_task_id: + time.sleep(2.5) + return last_task_result + + class CurrentTaskResponse(BaseModel): current_task: str = Field(default=None, title="Task ID", description="id of the current progress task") diff --git a/modules/ui.py b/modules/ui.py index 627fbe0b..0133ee12 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -41,6 +41,7 @@ from modules.textual_inversion import textual_inversion import modules.hypernetworks.ui from modules.generation_parameters_copypaste import image_from_url_text import modules.extras +from modules.progress import restore_progress_call warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) @@ -293,6 +294,7 @@ def create_toprow(is_img2img): interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') + restore_progress = gr.Button('Restore Progress', elem_id=f"{id_part}_restore_progress") skip.click( fn=lambda: shared.state.skip(), @@ -329,7 +331,7 @@ def create_toprow(is_img2img): prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button + return prompt, prompt_styles, negative_prompt, submit, restore_progress, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button def setup_progressbar(*args, **kwargs): @@ -446,7 +448,7 @@ def create_ui(): modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=False) + txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, restore_progress, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) @@ -578,6 +580,18 @@ def create_ui(): res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False) + restore_progress.click( + fn=lambda: restore_progress_call('txt2img'), + _js="() => restoreProgress('txt2img')", + inputs=[], + outputs=[ + txt2img_gallery, + generation_info, + html_info, + html_log, + ] + ) + txt_prompt_img.change( fn=modules.images.image_data, inputs=[ @@ -646,7 +660,7 @@ def create_ui(): modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=True) + img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, restore_progress, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=True) img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) @@ -898,6 +912,18 @@ def create_ui(): submit.click(**img2img_args) res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False) + restore_progress.click( + fn=lambda: restore_progress_call('img2img'), + _js="() => restoreProgress('img2img')", + inputs=[], + outputs=[ + img2img_gallery, + generation_info, + html_info, + html_log, + ] + ) + img2img_interrogate.click( fn=lambda *args: process_interrogate(interrogate, *args), **interrogate_args, @@ -1491,7 +1517,7 @@ def create_ui(): gr.HTML(shared.html("licenses.html"), elem_id="licenses") gr.Button(value="Show all pages", elem_id="settings_show_all_pages") - + def unload_sd_weights(): modules.sd_models.unload_model_weights() -- cgit v1.2.1 From e0b58527ff040f9c547ea45b5fcf1bfb7ab23cdd Mon Sep 17 00:00:00 2001 From: siutin Date: Mon, 6 Feb 2023 15:57:26 +0800 Subject: use condition to wait for result --- modules/call_queue.py | 2 +- modules/progress.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/call_queue.py b/modules/call_queue.py index 30ac26bc..9888109e 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -7,7 +7,7 @@ import time from modules import shared, progress queue_lock = threading.Lock() - +queue_lock_condition = threading.Condition(lock=queue_lock) def wrap_queued_call(func): def f(*args, **kwargs): diff --git a/modules/progress.py b/modules/progress.py index 36963c92..1947c0fd 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -6,6 +6,7 @@ import gradio as gr from pydantic import BaseModel, Field from typing import List +from modules import call_queue from modules.shared import opts import modules.shared as shared @@ -57,8 +58,9 @@ def restore_progress_call(task_tag): else: t_task = current_task - while t_task != last_task_id: - time.sleep(2.5) + with call_queue.queue_lock_condition: + call_queue.queue_lock_condition.wait_for(lambda: t_task == last_task_id) + return last_task_result -- cgit v1.2.1 From 90366b8d8564c6fcbf5899fb31e426b68b04eb7b Mon Sep 17 00:00:00 2001 From: siutin Date: Wed, 29 Mar 2023 00:13:15 +0800 Subject: tool button --- modules/ui.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index 0133ee12..8fc17ce7 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -82,6 +82,7 @@ apply_style_symbol = '\U0001f4cb' # 📋 clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ extra_networks_symbol = '\U0001F3B4' # 🎴 switch_values_symbol = '\U000021C5' # ⇅ +restore_progress_symbol = '\U0001F300' # 🌀 def plaintext_to_html(text): @@ -294,7 +295,6 @@ def create_toprow(is_img2img): interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt") skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip") submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') - restore_progress = gr.Button('Restore Progress', elem_id=f"{id_part}_restore_progress") skip.click( fn=lambda: shared.state.skip(), @@ -314,6 +314,7 @@ def create_toprow(is_img2img): extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") + restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress") token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") @@ -331,7 +332,7 @@ def create_toprow(is_img2img): prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - return prompt, prompt_styles, negative_prompt, submit, restore_progress, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button + return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button def setup_progressbar(*args, **kwargs): @@ -448,7 +449,7 @@ def create_ui(): modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, restore_progress, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=False) + txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) @@ -580,8 +581,8 @@ def create_ui(): res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False) - restore_progress.click( - fn=lambda: restore_progress_call('txt2img'), + restore_progress_button.click( + fn=lambda: restore_progress_call(), _js="() => restoreProgress('txt2img')", inputs=[], outputs=[ @@ -660,7 +661,7 @@ def create_ui(): modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, restore_progress, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=True) + img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) @@ -912,8 +913,8 @@ def create_ui(): submit.click(**img2img_args) res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False) - restore_progress.click( - fn=lambda: restore_progress_call('img2img'), + restore_progress_button.click( + fn=lambda: restore_progress_call(), _js="() => restoreProgress('img2img')", inputs=[], outputs=[ -- cgit v1.2.1 From 70ab21e67d128b953fbf4a360e02ac783f40dd55 Mon Sep 17 00:00:00 2001 From: siutin Date: Wed, 29 Mar 2023 00:17:19 +0800 Subject: keep randomId simpler --- modules/progress.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/progress.py b/modules/progress.py index 1947c0fd..e99267f5 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -49,8 +49,8 @@ def set_last_task_result(id_job, result): last_task_result = result -def restore_progress_call(task_tag): - if current_task is None or not current_task[5:-1].startswith(task_tag): +def restore_progress_call(): + if current_task is None: # image, generation_info, html_info, html_log return tuple(list([None, None, None, None])) -- cgit v1.2.1 From 18e4ca46944201ccd0b506201d5da441eba93080 Mon Sep 17 00:00:00 2001 From: Z_nonymous <0x29a@free.fr> Date: Fri, 31 Mar 2023 10:54:42 +0200 Subject: Fix #9185 --- modules/img2img.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/img2img.py b/modules/img2img.py index 953ac5d2..5ce408f4 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -4,7 +4,7 @@ import sys import traceback import numpy as np -from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops +from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError from modules import devices, sd_samplers from modules.generation_parameters_copypaste import create_override_settings_dict @@ -46,7 +46,10 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): if state.interrupted: break - img = Image.open(image) + try: + img = Image.open(image) + except UnidentifiedImageError: + continue # Use the EXIF orientation of photos taken by smartphones. img = ImageOps.exif_transpose(img) p.init_images = [img] * p.batch_size -- cgit v1.2.1 From 56680cd84ab68a283772cf697f8a72408a3f4167 Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Sat, 1 Apr 2023 02:07:08 -0500 Subject: first --- modules/cmd_args.py | 4 ++++ modules/sd_models.py | 8 ++++++++ 2 files changed, 12 insertions(+) (limited to 'modules') diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 81c0b82a..4314f97b 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -101,3 +101,7 @@ parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gra parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers") parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) + +# token merging / tomesd +parser.add_argument("--token-merging", action='store_true', help="Provides generation speedup by merging redundant tokens. (compatible with --xformers)", default=False) +parser.add_argument("--token-merging-ratio", type=float, help="Adjusts ratio of merged to untouched tokens. Range: (0.0-1.0], Defaults to 0.5", default=0.5) diff --git a/modules/sd_models.py b/modules/sd_models.py index 6ea874df..0b74aa0f 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -9,6 +9,7 @@ from omegaconf import OmegaConf from os import mkdir from urllib import request import ldm.modules.midas as midas +import tomesd from ldm.util import instantiate_from_config @@ -430,6 +431,13 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_ try: with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd): sd_model = instantiate_from_config(sd_config.model) + + if shared.cmd_opts.token_merging: + ratio = shared.cmd_opts.token_merging_ratio + + tomesd.apply_patch(sd_model, ratio=ratio) + print(f"Model accelerated using {(ratio * 100)}% token merging via tomesd.") + timer.record("token merging") except Exception as e: pass -- cgit v1.2.1 From ef8c0440512f2fae9ceeb977b73f881c0afbb90a Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Sat, 1 Apr 2023 03:21:23 -0500 Subject: forgot to add reinstall arg back earlier since args moved out of shared --- modules/cmd_args.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 4314f97b..80df9465 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -105,3 +105,4 @@ parser.add_argument("--no-download-sd-model", action='store_true', help="don't d # token merging / tomesd parser.add_argument("--token-merging", action='store_true', help="Provides generation speedup by merging redundant tokens. (compatible with --xformers)", default=False) parser.add_argument("--token-merging-ratio", type=float, help="Adjusts ratio of merged to untouched tokens. Range: (0.0-1.0], Defaults to 0.5", default=0.5) +parser.add_argument("--reinstall-tomesd", action='store_true', help="Reinstalls tomesd", default=False) -- cgit v1.2.1 From 26ab018253cb078630fcdde47dbaee85f2466145 Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Sat, 1 Apr 2023 03:31:22 -0500 Subject: delay import --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/sd_models.py b/modules/sd_models.py index 0b74aa0f..2c05ec17 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -9,7 +9,6 @@ from omegaconf import OmegaConf from os import mkdir from urllib import request import ldm.modules.midas as midas -import tomesd from ldm.util import instantiate_from_config @@ -433,6 +432,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_ sd_model = instantiate_from_config(sd_config.model) if shared.cmd_opts.token_merging: + import tomesd ratio = shared.cmd_opts.token_merging_ratio tomesd.apply_patch(sd_model, ratio=ratio) -- cgit v1.2.1 From 8c88bf40060c86ba508646c6d7ddc21e389be846 Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Sat, 1 Apr 2023 14:12:12 -0500 Subject: use pypi package for tomesd intead of manually cloning repo --- modules/cmd_args.py | 1 - 1 file changed, 1 deletion(-) (limited to 'modules') diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 80df9465..4314f97b 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -105,4 +105,3 @@ parser.add_argument("--no-download-sd-model", action='store_true', help="don't d # token merging / tomesd parser.add_argument("--token-merging", action='store_true', help="Provides generation speedup by merging redundant tokens. (compatible with --xformers)", default=False) parser.add_argument("--token-merging-ratio", type=float, help="Adjusts ratio of merged to untouched tokens. Range: (0.0-1.0], Defaults to 0.5", default=0.5) -parser.add_argument("--reinstall-tomesd", action='store_true', help="Reinstalls tomesd", default=False) -- cgit v1.2.1 From a609bd56b4206460d1df3c3022025fc78b66718f Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Sat, 1 Apr 2023 22:18:35 -0500 Subject: Transition to using settings through UI instead of cmd line args. Added feature to only apply to hr-fix. Install package using requirements_versions.txt --- modules/processing.py | 35 +++++++++++++++++++++++++++++++++++ modules/sd_models.py | 7 ------- modules/shared.py | 44 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 79 insertions(+), 7 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..e115aadd 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -29,6 +29,7 @@ from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion from einops import repeat, rearrange from blendmodes.blend import blendLayers, BlendType +import tomesd # some of those options should not be changed at all because they would break the model, so I removed them from options. opt_C = 4 @@ -500,9 +501,28 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() + if opts.token_merging: + + if p.hr_second_pass_steps < 1 and not opts.token_merging_hr_only: + tomesd.apply_patch( + p.sd_model, + ratio=opts.token_merging_ratio, + max_downsample=opts.token_merging_maximum_down_sampling, + sx=opts.token_merging_stride_x, + sy=opts.token_merging_stride_y, + use_rand=opts.token_merging_random, + merge_attn=opts.token_merging_merge_attention, + merge_crossattn=opts.token_merging_merge_cross_attention, + merge_mlp=opts.token_merging_merge_mlp + ) + res = process_images_inner(p) finally: + # undo model optimizations made by tomesd + if opts.token_merging: + tomesd.remove_patch(p.sd_model) + # restore opts to original state if p.override_settings_restore_afterwards: for k, v in stored_opts.items(): @@ -938,6 +958,21 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x = None devices.torch_gc() + # apply token merging optimizations from tomesd for high-res pass + # check if hr_only so we don't redundantly apply patch + if opts.token_merging and opts.token_merging_hr_only: + tomesd.apply_patch( + self.sd_model, + ratio=opts.token_merging_ratio, + max_downsample=opts.token_merging_maximum_down_sampling, + sx=opts.token_merging_stride_x, + sy=opts.token_merging_stride_y, + use_rand=opts.token_merging_random, + merge_attn=opts.token_merging_merge_attention, + merge_crossattn=opts.token_merging_merge_cross_attention, + merge_mlp=opts.token_merging_merge_mlp + ) + samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) return samples diff --git a/modules/sd_models.py b/modules/sd_models.py index 2c05ec17..87c49b83 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -431,13 +431,6 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_ with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd): sd_model = instantiate_from_config(sd_config.model) - if shared.cmd_opts.token_merging: - import tomesd - ratio = shared.cmd_opts.token_merging_ratio - - tomesd.apply_patch(sd_model, ratio=ratio) - print(f"Model accelerated using {(ratio * 100)}% token merging via tomesd.") - timer.record("token merging") except Exception as e: pass diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..d7379e24 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -427,6 +427,50 @@ options_templates.update(options_section((None, "Hidden options"), { "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), })) +options_templates.update(options_section(('token_merging', 'Token Merging'), { + "token_merging": OptionInfo( + False, "Enable redundant token merging via tomesd. (currently incompatible with controlnet extension)", + gr.Checkbox + ), + "token_merging_ratio": OptionInfo( + 0.5, "Merging Ratio", + gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1} + ), + "token_merging_hr_only": OptionInfo( + True, "Apply only to high-res fix pass. Disabling can yield a ~20-35% speedup on contemporary resolutions.", + gr.Checkbox + ), + # More advanced/niche settings: + "token_merging_random": OptionInfo( + True, "Use random perturbations - Disabling might help with certain samplers", + gr.Checkbox + ), + "token_merging_merge_attention": OptionInfo( + True, "Merge attention", + gr.Checkbox + ), + "token_merging_merge_cross_attention": OptionInfo( + False, "Merge cross attention", + gr.Checkbox + ), + "token_merging_merge_mlp": OptionInfo( + False, "Merge mlp", + gr.Checkbox + ), + "token_merging_maximum_down_sampling": OptionInfo( + 1, "Maximum down sampling", + gr.Dropdown, lambda: {"choices": ["1", "2", "4", "8"]} + ), + "token_merging_stride_x": OptionInfo( + 2, "Stride - X", + gr.Slider, {"minimum": 2, "maximum": 8, "step": 2} + ), + "token_merging_stride_y": OptionInfo( + 2, "Stride - Y", + gr.Slider, {"minimum": 2, "maximum": 8, "step": 2} + ) +})) + options_templates.update() -- cgit v1.2.1 From c707b7df95a61b66a05be94e805e1be9a432e294 Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Sat, 1 Apr 2023 23:47:10 -0500 Subject: remove excess condition --- modules/processing.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index e115aadd..55735572 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -501,26 +501,26 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if opts.token_merging: - - if p.hr_second_pass_steps < 1 and not opts.token_merging_hr_only: - tomesd.apply_patch( - p.sd_model, - ratio=opts.token_merging_ratio, - max_downsample=opts.token_merging_maximum_down_sampling, - sx=opts.token_merging_stride_x, - sy=opts.token_merging_stride_y, - use_rand=opts.token_merging_random, - merge_attn=opts.token_merging_merge_attention, - merge_crossattn=opts.token_merging_merge_cross_attention, - merge_mlp=opts.token_merging_merge_mlp - ) + if opts.token_merging and not opts.token_merging_hr_only: + print("applying token merging to all passes") + tomesd.apply_patch( + p.sd_model, + ratio=opts.token_merging_ratio, + max_downsample=opts.token_merging_maximum_down_sampling, + sx=opts.token_merging_stride_x, + sy=opts.token_merging_stride_y, + use_rand=opts.token_merging_random, + merge_attn=opts.token_merging_merge_attention, + merge_crossattn=opts.token_merging_merge_cross_attention, + merge_mlp=opts.token_merging_merge_mlp + ) res = process_images_inner(p) finally: # undo model optimizations made by tomesd if opts.token_merging: + print('removing token merging model optimizations') tomesd.remove_patch(p.sd_model) # restore opts to original state @@ -961,6 +961,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): # apply token merging optimizations from tomesd for high-res pass # check if hr_only so we don't redundantly apply patch if opts.token_merging and opts.token_merging_hr_only: + print("applying token merging for high-res pass") tomesd.apply_patch( self.sd_model, ratio=opts.token_merging_ratio, -- cgit v1.2.1 From d132481058f8a827cd407f2121f128a2bb862f7a Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sun, 2 Apr 2023 17:41:55 -0500 Subject: Embed model merge metadata in .safetensors file --- modules/extras.py | 44 ++++++++++++++++++++++++++++++++++++++++++-- modules/sd_models.py | 11 ++++++++++- modules/ui.py | 4 +++- 3 files changed, 55 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/extras.py b/modules/extras.py index d8ece955..77d88592 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -1,6 +1,7 @@ import os import re import shutil +import json import torch @@ -71,7 +72,7 @@ def to_half(tensor, enable): return tensor -def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights): +def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights, save_metadata): shared.state.begin() shared.state.job = 'model-merge' @@ -241,13 +242,52 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ shared.state.textinfo = "Saving" print(f"Saving to {output_modelname}...") + metadata = {"format": "pt", "models": {}, "merge_recipe": None} + + if save_metadata: + merge_recipe = { + "primary_model_hash": primary_model_info.sha256, + "secondary_model_hash": secondary_model_info.sha256 if secondary_model_info else None, + "tertiary_model_hash": tertiary_model_info.sha256 if tertiary_model_info else None, + "interp_method": interp_method, + "multiplier": multiplier, + "save_as_half": save_as_half, + "custom_name": custom_name, + "config_source": config_source, + "bake_in_vae": bake_in_vae, + "discard_weights": discard_weights, + "is_inpainting": result_is_inpainting_model, + "is_instruct_pix2pix": result_is_instruct_pix2pix_model + } + metadata["merge_recipe"] = json.dumps(merge_recipe) + + def add_model_metadata(checkpoint_info): + metadata["models"][checkpoint_info.sha256] = { + "name": checkpoint_info.name, + "legacy_hash": checkpoint_info.hash, + "merge_recipe": checkpoint_info.metadata.get("merge_recipe", None) + } + + metadata["models"].update(checkpoint_info.metadata.get("models", {})) + + add_model_metadata(primary_model_info) + if secondary_model_info: + add_model_metadata(secondary_model_info) + if tertiary_model_info: + add_model_metadata(tertiary_model_info) + + metadata["models"] = json.dumps(metadata["models"]) + _, extension = os.path.splitext(output_modelname) if extension.lower() == ".safetensors": - safetensors.torch.save_file(theta_0, output_modelname, metadata={"format": "pt"}) + safetensors.torch.save_file(theta_0, output_modelname, metadata=metadata) else: torch.save(theta_0, output_modelname) sd_models.list_models() + created_model = next((ckpt for ckpt in sd_models.checkpoints_list.values() if ckpt.name == filename), None) + if created_model: + created_model.calculate_shorthash() create_config(output_modelname, config_source, primary_model_info, secondary_model_info, tertiary_model_info) diff --git a/modules/sd_models.py b/modules/sd_models.py index 6ea874df..4f7613a1 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -52,6 +52,15 @@ class CheckpointInfo: self.ids = [self.hash, self.model_name, self.title, name, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else []) + self.metadata = {} + + _, ext = os.path.splitext(self.filename) + if ext.lower() == ".safetensors": + try: + self.metadata = read_metadata_from_safetensors(filename) + except Exception as e: + errors.display(e, f"reading checkpoint metadata: {filename}") + def register(self): checkpoints_list[self.title] = self for id in self.ids: @@ -544,4 +553,4 @@ def unload_model_weights(sd_model=None, info=None): print(f"Unloaded weights {timer.summary()}.") - return sd_model \ No newline at end of file + return sd_model diff --git a/modules/ui.py b/modules/ui.py index 627fbe0b..64fb93c3 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1019,8 +1019,9 @@ def create_ui(): interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description]) with FormRow(): - checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="ckpt", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") + checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="safetensors", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") + save_metadata = gr.Checkbox(value=True, label="Save metadata (.safetensors only)", elem_id="modelmerger_save_metadata") with FormRow(): with gr.Column(): @@ -1658,6 +1659,7 @@ def create_ui(): config_source, bake_in_vae, discard_weights, + save_metadata, ], outputs=[ primary_model_name, -- cgit v1.2.1 From afc349c2c0d7c7543e8cc085cde2beef8549fffc Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sun, 2 Apr 2023 18:40:33 -0500 Subject: Add field for model merge type Incase this is supported by other merge extensions --- modules/extras.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/extras.py b/modules/extras.py index 77d88592..9a00c9a3 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -246,6 +246,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ if save_metadata: merge_recipe = { + "type": "webui", # indicate this model was merged with webui's built-in merger "primary_model_hash": primary_model_info.sha256, "secondary_model_hash": secondary_model_info.sha256 if secondary_model_info else None, "tertiary_model_hash": tertiary_model_info.sha256 if tertiary_model_info else None, -- cgit v1.2.1 From 7c016dd642cc29e064715ac04ab3d83c5451b45e Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sun, 2 Apr 2023 19:06:39 -0500 Subject: Calculate shorthash on merge if not exist --- modules/extras.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/extras.py b/modules/extras.py index 9a00c9a3..97d14e5a 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -263,6 +263,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ metadata["merge_recipe"] = json.dumps(merge_recipe) def add_model_metadata(checkpoint_info): + checkpoint_info.calculate_shorthash() metadata["models"][checkpoint_info.sha256] = { "name": checkpoint_info.name, "legacy_hash": checkpoint_info.hash, -- cgit v1.2.1 From 9a4e650800adc444c07a48572a958a71c2144c15 Mon Sep 17 00:00:00 2001 From: Pluventi Date: Mon, 3 Apr 2023 03:32:48 +0200 Subject: Update postprocessing.py Solution for anyone getting an error when batching on extras, even with a clean install of "stable diffusion webui" --- modules/postprocessing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 09d8e605..63b9caf8 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -18,7 +18,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if extras_mode == 1: for img in image_folder: - image = Image.open(img) + image = Image.open(img.name) image_data.append(image) image_names.append(os.path.splitext(img.orig_name)[0]) elif extras_mode == 2: -- cgit v1.2.1 From fbaf6e4fd897fa1f3e3f747f1d699c240cad76a0 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sun, 2 Apr 2023 21:41:23 -0500 Subject: Namespace metadata fields --- modules/extras.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'modules') diff --git a/modules/extras.py b/modules/extras.py index 97d14e5a..ff4e9c4e 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -242,7 +242,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ shared.state.textinfo = "Saving" print(f"Saving to {output_modelname}...") - metadata = {"format": "pt", "models": {}, "merge_recipe": None} + metadata = {"format": "pt", "sd_merge_models": {}, "sd_merge_recipe": None} if save_metadata: merge_recipe = { @@ -260,17 +260,17 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ "is_inpainting": result_is_inpainting_model, "is_instruct_pix2pix": result_is_instruct_pix2pix_model } - metadata["merge_recipe"] = json.dumps(merge_recipe) + metadata["sd_merge_recipe"] = json.dumps(merge_recipe) def add_model_metadata(checkpoint_info): checkpoint_info.calculate_shorthash() - metadata["models"][checkpoint_info.sha256] = { + metadata["sd_merge_models"][checkpoint_info.sha256] = { "name": checkpoint_info.name, "legacy_hash": checkpoint_info.hash, - "merge_recipe": checkpoint_info.metadata.get("merge_recipe", None) + "sd_merge_recipe": checkpoint_info.metadata.get("sd_merge_recipe", None) } - metadata["models"].update(checkpoint_info.metadata.get("models", {})) + metadata["sd_merge_models"].update(checkpoint_info.metadata.get("sd_merge_models", {})) add_model_metadata(primary_model_info) if secondary_model_info: @@ -278,7 +278,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ if tertiary_model_info: add_model_metadata(tertiary_model_info) - metadata["models"] = json.dumps(metadata["models"]) + metadata["sd_merge_models"] = json.dumps(metadata["sd_merge_models"]) _, extension = os.path.splitext(output_modelname) if extension.lower() == ".safetensors": -- cgit v1.2.1 From f7215906af573f6e60b006cd430e82691ba4f8d6 Mon Sep 17 00:00:00 2001 From: gmasil <54176035+gmasil@users.noreply.github.com> Date: Mon, 3 Apr 2023 18:19:57 +0200 Subject: allow styles.csv to be symlinked or mounted in docker without moving the file around --- modules/styles.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'modules') diff --git a/modules/styles.py b/modules/styles.py index 990d5623..9ed85991 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -72,16 +72,14 @@ class StyleDatabase: return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles]) def save_styles(self, path: str) -> None: - # Write to temporary file first, so we don't nuke the file if something goes wrong - fd, temp_path = tempfile.mkstemp(".csv") + # Always keep a backup file around + if os.path.exists(path): + shutil.copy(path, path + ".bak") + + fd = os.open(path, os.O_RDWR|os.O_CREAT) with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file: # _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple, # and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict() writer = csv.DictWriter(file, fieldnames=PromptStyle._fields) writer.writeheader() writer.writerows(style._asdict() for k, style in self.styles.items()) - - # Always keep a backup file around - if os.path.exists(path): - shutil.move(path, path + ".bak") - shutil.move(temp_path, path) -- cgit v1.2.1 From 5ebe3b25044efac959c4d8f208dd17d6f1e5ce8a Mon Sep 17 00:00:00 2001 From: Nathanael Santoso <73165142+nart4hire@users.noreply.github.com> Date: Tue, 4 Apr 2023 06:50:29 +0000 Subject: Added guard clause to prevent multiple tunnel creations --- modules/ngrok.py | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'modules') diff --git a/modules/ngrok.py b/modules/ngrok.py index 3df2c06b..90268bf1 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -1,6 +1,14 @@ from pyngrok import ngrok, conf, exception def connect(token, port, region): + # Guard for existing tunnels + existing = ngrok.get_tunnels() + if existing: + public_url = existing[0].public_url + print(f'ngrok connected to localhost:{port}! URL: {public_url}\n' + 'You can use this link after the launch is complete.') + return + account = None if token is None: token = 'None' -- cgit v1.2.1 From 2edf73b38fbea4ce67c38edafea8a8983b9435b5 Mon Sep 17 00:00:00 2001 From: Nathanael Santoso <73165142+nart4hire@users.noreply.github.com> Date: Tue, 4 Apr 2023 06:57:39 +0000 Subject: Improved message clarity --- modules/ngrok.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/ngrok.py b/modules/ngrok.py index 90268bf1..5302b6a8 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -5,7 +5,7 @@ def connect(token, port, region): existing = ngrok.get_tunnels() if existing: public_url = existing[0].public_url - print(f'ngrok connected to localhost:{port}! URL: {public_url}\n' + print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n' 'You can use this link after the launch is complete.') return -- cgit v1.2.1 From 5c8e53d5e98da0eabf384318955c57842d612c07 Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Tue, 4 Apr 2023 02:26:44 -0500 Subject: Allow different merge ratios to be used for each pass. Make toggle cmd flag work again. Remove ratio flag. Remove warning about controlnet being incompatible --- modules/cmd_args.py | 3 +-- modules/processing.py | 44 +++++++++++++++----------------------------- modules/sd_models.py | 29 ++++++++++++++++++++++++++++- modules/shared.py | 6 +++++- 4 files changed, 49 insertions(+), 33 deletions(-) (limited to 'modules') diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 4314f97b..8e5a7fab 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -103,5 +103,4 @@ parser.add_argument("--no-hashing", action='store_true', help="disable sha256 ha parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) # token merging / tomesd -parser.add_argument("--token-merging", action='store_true', help="Provides generation speedup by merging redundant tokens. (compatible with --xformers)", default=False) -parser.add_argument("--token-merging-ratio", type=float, help="Adjusts ratio of merged to untouched tokens. Range: (0.0-1.0], Defaults to 0.5", default=0.5) +parser.add_argument("--token-merging", action='store_true', help="Provides speed and memory improvements by merging redundant tokens. This has a more pronounced effect on higher resolutions.", default=False) diff --git a/modules/processing.py b/modules/processing.py index 55735572..670a7a28 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -501,26 +501,16 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if opts.token_merging and not opts.token_merging_hr_only: - print("applying token merging to all passes") - tomesd.apply_patch( - p.sd_model, - ratio=opts.token_merging_ratio, - max_downsample=opts.token_merging_maximum_down_sampling, - sx=opts.token_merging_stride_x, - sy=opts.token_merging_stride_y, - use_rand=opts.token_merging_random, - merge_attn=opts.token_merging_merge_attention, - merge_crossattn=opts.token_merging_merge_cross_attention, - merge_mlp=opts.token_merging_merge_mlp - ) + if (opts.token_merging or cmd_opts.token_merging) and not opts.token_merging_hr_only: + print("\nApplying token merging\n") + sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) res = process_images_inner(p) finally: # undo model optimizations made by tomesd - if opts.token_merging: - print('removing token merging model optimizations') + if opts.token_merging or cmd_opts.token_merging: + print('\nRemoving token merging model optimizations\n') tomesd.remove_patch(p.sd_model) # restore opts to original state @@ -959,20 +949,16 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): devices.torch_gc() # apply token merging optimizations from tomesd for high-res pass - # check if hr_only so we don't redundantly apply patch - if opts.token_merging and opts.token_merging_hr_only: - print("applying token merging for high-res pass") - tomesd.apply_patch( - self.sd_model, - ratio=opts.token_merging_ratio, - max_downsample=opts.token_merging_maximum_down_sampling, - sx=opts.token_merging_stride_x, - sy=opts.token_merging_stride_y, - use_rand=opts.token_merging_random, - merge_attn=opts.token_merging_merge_attention, - merge_crossattn=opts.token_merging_merge_cross_attention, - merge_mlp=opts.token_merging_merge_mlp - ) + # check if hr_only so we are not redundantly patching + if (cmd_opts.token_merging or opts.token_merging) and (opts.token_merging_hr_only or opts.token_merging_ratio_hr != opts.token_merging_ratio): + # case where user wants to use separate merge ratios + if not opts.token_merging_hr_only: + # clean patch done by first pass. (clobbering the first patch might be fine? this might be excessive) + print('Temporarily reverting token merging optimizations in preparation for next pass') + tomesd.remove_patch(self.sd_model) + + print("\nApplying token merging for high-res pass\n") + sd_models.apply_token_merging(sd_model=self.sd_model, hr=True) samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) diff --git a/modules/sd_models.py b/modules/sd_models.py index 87c49b83..696a2333 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -16,6 +16,7 @@ from modules import paths, shared, modelloader, devices, script_callbacks, sd_va from modules.paths import models_path from modules.sd_hijack_inpainting import do_inpainting_hijack from modules.timer import Timer +import tomesd model_dir = "Stable-diffusion" model_path = os.path.abspath(os.path.join(paths.models_path, model_dir)) @@ -545,4 +546,30 @@ def unload_model_weights(sd_model=None, info=None): print(f"Unloaded weights {timer.summary()}.") - return sd_model \ No newline at end of file + return sd_model + + +def apply_token_merging(sd_model, hr: bool): + """ + Applies speed and memory optimizations from tomesd. + + Args: + hr (bool): True if called in the context of a high-res pass + """ + + ratio = shared.opts.token_merging_ratio + if hr: + ratio = shared.opts.token_merging_ratio_hr + print("effective hr pass merge ratio is "+str(ratio)) + + tomesd.apply_patch( + sd_model, + ratio=ratio, + max_downsample=shared.opts.token_merging_maximum_down_sampling, + sx=shared.opts.token_merging_stride_x, + sy=shared.opts.token_merging_stride_y, + use_rand=shared.opts.token_merging_random, + merge_attn=shared.opts.token_merging_merge_attention, + merge_crossattn=shared.opts.token_merging_merge_cross_attention, + merge_mlp=shared.opts.token_merging_merge_mlp + ) diff --git a/modules/shared.py b/modules/shared.py index d7379e24..c7572e98 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -429,7 +429,7 @@ options_templates.update(options_section((None, "Hidden options"), { options_templates.update(options_section(('token_merging', 'Token Merging'), { "token_merging": OptionInfo( - False, "Enable redundant token merging via tomesd. (currently incompatible with controlnet extension)", + 0.5, "Enable redundant token merging via tomesd. This can provide significant speed and memory improvements.", gr.Checkbox ), "token_merging_ratio": OptionInfo( @@ -440,6 +440,10 @@ options_templates.update(options_section(('token_merging', 'Token Merging'), { True, "Apply only to high-res fix pass. Disabling can yield a ~20-35% speedup on contemporary resolutions.", gr.Checkbox ), + "token_merging_ratio_hr": OptionInfo( + 0.5, "Merging Ratio (high-res pass) - If 'Apply only to high-res' is enabled, this will always be the ratio used.", + gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1} + ), # More advanced/niche settings: "token_merging_random": OptionInfo( True, "Use random perturbations - Disabling might help with certain samplers", -- cgit v1.2.1 From cf5a5773bfd1ca6a3c35232881661ec1ff4b67d7 Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Tue, 4 Apr 2023 02:39:13 -0500 Subject: :p --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index c7572e98..568acdc4 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -429,7 +429,7 @@ options_templates.update(options_section((None, "Hidden options"), { options_templates.update(options_section(('token_merging', 'Token Merging'), { "token_merging": OptionInfo( - 0.5, "Enable redundant token merging via tomesd. This can provide significant speed and memory improvements.", + False, "Enable redundant token merging via tomesd. This can provide significant speed and memory improvements.", gr.Checkbox ), "token_merging_ratio": OptionInfo( -- cgit v1.2.1 From 3158d17ccf6adfe974691f46526773df2f4028f5 Mon Sep 17 00:00:00 2001 From: Nathanael Santoso <73165142+nart4hire@users.noreply.github.com> Date: Tue, 4 Apr 2023 07:41:55 +0000 Subject: fixed an issue with using ngrok for other connections and also ngrok not using auth_token --- modules/ngrok.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'modules') diff --git a/modules/ngrok.py b/modules/ngrok.py index 5302b6a8..1ad7989b 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -1,14 +1,6 @@ from pyngrok import ngrok, conf, exception def connect(token, port, region): - # Guard for existing tunnels - existing = ngrok.get_tunnels() - if existing: - public_url = existing[0].public_url - print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n' - 'You can use this link after the launch is complete.') - return - account = None if token is None: token = 'None' @@ -21,6 +13,18 @@ def connect(token, port, region): config = conf.PyngrokConfig( auth_token=token, region=region ) + + # Guard for existing tunnels + existing = ngrok.get_tunnels(pyngrok_config=config) + if existing: + for established in existing: + # Extra configuration in the case that the user is also using ngrok for other tunnels + if established.config['addr'][-4:] == str(port): + public_url = existing[0].public_url + print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n' + 'You can use this link after the launch is complete.') + return + try: if account is None: public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True).public_url -- cgit v1.2.1 From 539a69860bafe1c922029722a50462f3ef0db5e4 Mon Sep 17 00:00:00 2001 From: hitomi Date: Sat, 25 Mar 2023 13:58:53 -0700 Subject: fix `--realesrgan-models-path` not working --- modules/realesrgan_model.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index aad4a629..d6079433 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -9,7 +9,7 @@ from realesrgan import RealESRGANer from modules.upscaler import Upscaler, UpscalerData from modules.shared import cmd_opts, opts - +from modules import modelloader class UpscalerRealESRGAN(Upscaler): def __init__(self, path): @@ -23,7 +23,15 @@ class UpscalerRealESRGAN(Upscaler): self.enable = True self.scalers = [] scalers = self.load_models(path) + + local_model_paths = self.find_models(ext_filter=[".pth"]) for scaler in scalers: + if scaler.local_data_path.startswith("http"): + filename = modelloader.friendly_name(scaler.local_data_path) + local = next(iter([local_model for local_model in local_model_paths if local_model.endswith(filename + '.pth')]), None) + if local: + scaler.local_data_path = local + if scaler.name in opts.realesrgan_enabled_models: self.scalers.append(scaler) @@ -64,7 +72,9 @@ class UpscalerRealESRGAN(Upscaler): print(f"Unable to find model info: {path}") return None - info.local_data_path = load_file_from_url(url=info.data_path, model_dir=self.model_path, progress=True) + if info.local_data_path.startswith("http"): + info.local_data_path = load_file_from_url(url=info.data_path, model_dir=self.model_path, progress=True) + return info except Exception as e: print(f"Error making Real-ESRGAN models list: {e}", file=sys.stderr) -- cgit v1.2.1 From 52a8f286ef99bb5004bea2b099a7bcbb073b638f Mon Sep 17 00:00:00 2001 From: Andre Ubuntu Date: Wed, 5 Apr 2023 20:28:00 -0300 Subject: fix preprocess orientation --- modules/textual_inversion/preprocess.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 2239cb84..9cb98694 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -161,7 +161,25 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre params.subindex = 0 filename = os.path.join(src, imagefile) try: - img = Image.open(filename).convert("RGB") + img = Image.open(filename) + # make sure to rotate the image according to EXIF data of the original image + # ImageOps.exif_transpose(img) # doesn't work for some reason + EXIF = img._getexif() + # rotate the image according to the EXIF data + try: + if EXIF[274] == 3: + # print("Rotating image by 180 degrees") + img = img.rotate(180, expand=True) + elif EXIF[274] == 6: + # print("Rotating image by 270 degrees") + img = img.rotate(270, expand=True) + elif EXIF[274] == 8: + # print("Rotating image by 90 degrees") + img = img.rotate(90, expand=True) + except: + pass + # print("No EXIF data found for image: " + filename) + img = img.convert("RGB") except Exception: continue -- cgit v1.2.1 From 48c06af8dc718abf0bf9355ea5548c6a66e0b1e6 Mon Sep 17 00:00:00 2001 From: Andre Ubuntu Date: Wed, 5 Apr 2023 20:51:29 -0300 Subject: Pythonic way to achieve it --- modules/textual_inversion/preprocess.py | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) (limited to 'modules') diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 9cb98694..de1ddb59 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -162,23 +162,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre filename = os.path.join(src, imagefile) try: img = Image.open(filename) - # make sure to rotate the image according to EXIF data of the original image - # ImageOps.exif_transpose(img) # doesn't work for some reason - EXIF = img._getexif() - # rotate the image according to the EXIF data - try: - if EXIF[274] == 3: - # print("Rotating image by 180 degrees") - img = img.rotate(180, expand=True) - elif EXIF[274] == 6: - # print("Rotating image by 270 degrees") - img = img.rotate(270, expand=True) - elif EXIF[274] == 8: - # print("Rotating image by 90 degrees") - img = img.rotate(90, expand=True) - except: - pass - # print("No EXIF data found for image: " + filename) + img = ImageOps.exif_transpose(img) img = img.convert("RGB") except Exception: continue -- cgit v1.2.1 From b3593d0997bfdcca7f8aa01663e81720db50e494 Mon Sep 17 00:00:00 2001 From: For Sure Date: Thu, 6 Apr 2023 19:42:26 +0300 Subject: Add support for saving init images in img2img --- modules/processing.py | 8 ++++++++ modules/shared.py | 3 +++ 2 files changed, 11 insertions(+) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..5556afc5 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -3,6 +3,7 @@ import math import os import sys import warnings +import hashlib import torch import numpy as np @@ -476,6 +477,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, + "Init image hash": getattr(p, 'init_img_hash', None) } generation_params.update(p.extra_generation_params) @@ -1007,6 +1009,12 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.color_corrections = [] imgs = [] for img in self.init_images: + + # Save init image + if opts.save_init_img: + self.init_img_hash = hashlib.md5(img.tobytes()).hexdigest() + images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False) + image = images.flatten(img, opts.img2img_background_color) if crop_region is None and self.resize_mode != 3: diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..69c2b21e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -39,6 +39,7 @@ restricted_opts = { "outdir_grids", "outdir_txt2img_grids", "outdir_save", + "outdir_init_images" } ui_reorder_categories = [ @@ -253,6 +254,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"), + "save_init_img": OptionInfo(True, "Save init images when using img2img"), "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), @@ -268,6 +270,7 @@ options_templates.update(options_section(('saving-paths', "Paths for saving"), { "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), + "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), })) options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { -- cgit v1.2.1 From 63a6f9b4d98a192bb359910cb284cf00582baabf Mon Sep 17 00:00:00 2001 From: forsurefr <67145502+forsurefr@users.noreply.github.com> Date: Fri, 7 Apr 2023 12:13:51 +0300 Subject: Do not save init image by default --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 69c2b21e..c5a1b5ad 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -254,7 +254,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"), - "save_init_img": OptionInfo(True, "Save init images when using img2img"), + "save_init_img": OptionInfo(False, "Save init images when using img2img"), "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), -- cgit v1.2.1 From d609f6030ec464b371a899ced366c62bbd9a4a91 Mon Sep 17 00:00:00 2001 From: gk Date: Fri, 7 Apr 2023 21:04:46 +0900 Subject: Add [batch_number] and [generation_number] filename patterns --- modules/images.py | 6 ++++++ modules/processing.py | 4 +++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index b3535070..5c0cf1d8 100644 --- a/modules/images.py +++ b/modules/images.py @@ -352,6 +352,8 @@ class FilenameGenerator: 'prompt_no_styles': lambda self: self.prompt_no_style(), 'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False), 'prompt_words': lambda self: self.prompt_words(), + 'batch_number': lambda self: self.p.batch_index + 1, + 'generation_number': lambda self: self.p.iteration * self.p.batch_size + self.p.batch_index + 1, } default_time_format = '%Y%m%d%H%M%S' @@ -403,6 +405,10 @@ class FilenameGenerator: for m in re_pattern.finditer(x): text, pattern = m.groups() + + if pattern is not None and (pattern.lower() == 'batch_number' and self.p.batch_size == 1 or pattern.lower() == 'generation_number' and self.p.n_iter == 1 and self.p.batch_size == 1): + continue + res += text if pattern is None: diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..0e6a60ba 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -670,6 +670,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n) for i, x_sample in enumerate(x_samples_ddim): + p.batch_index = i + x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) @@ -718,7 +720,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if opts.return_mask: output_images.append(image_mask) - + if opts.return_mask_composite: output_images.append(image_mask_composite) -- cgit v1.2.1 From 27b9ec60e4ede748ec23615fecddb70e48daa623 Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Sat, 8 Apr 2023 15:58:00 -0400 Subject: sort embeddings by name (case insensitive) --- modules/textual_inversion/textual_inversion.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index d2e62e58..7c50839f 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -2,7 +2,7 @@ import os import sys import traceback import inspect -from collections import namedtuple +from collections import namedtuple, OrderedDict import torch import tqdm @@ -108,7 +108,7 @@ class DirWithTextualInversionEmbeddings: class EmbeddingDatabase: def __init__(self): self.ids_lookup = {} - self.word_embeddings = {} + self.word_embeddings = OrderedDict() self.skipped_embeddings = {} self.expected_shape = -1 self.embedding_dirs = {} @@ -233,6 +233,9 @@ class EmbeddingDatabase: self.load_from_dir(embdir) embdir.update() + # re-sort word_embeddings because load_from_dir may not load in alphabetic order. + self.word_embeddings = {e.name: e for e in sorted(self.word_embeddings.values(), key=lambda e: e.name.lower())} + displayed_embeddings = (tuple(self.word_embeddings.keys()), tuple(self.skipped_embeddings.keys())) if self.previously_displayed_embeddings != displayed_embeddings: self.previously_displayed_embeddings = displayed_embeddings -- cgit v1.2.1 From 1aba8d82cbb816a755d012c5c729d8bafeb1b8ed Mon Sep 17 00:00:00 2001 From: yike5460 Date: Sun, 9 Apr 2023 22:22:43 +0800 Subject: feat: add branch support for extension installation --- modules/ui_extensions.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index efd6cda2..d9487f83 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -129,7 +129,7 @@ def normalize_git_url(url): return url -def install_extension_from_url(dirname, url): +def install_extension_from_url(dirname, branch_name, url): check_access() assert url, 'No URL specified' @@ -150,7 +150,7 @@ def install_extension_from_url(dirname, url): try: shutil.rmtree(tmpdir, True) - with git.Repo.clone_from(url, tmpdir) as repo: + with git.Repo.clone_from(url, tmpdir, branch=branch_name if branch_name else '') as repo: repo.remote().fetch() for submodule in repo.submodules: submodule.update() @@ -376,13 +376,14 @@ def create_ui(): with gr.TabItem("Install from URL"): install_url = gr.Text(label="URL for extension's git repository") + install_branch = gr.Text(label="Branch name for extension's git repository", placeholder="Leave empty for default branch") install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto") install_button = gr.Button(value="Install", variant="primary") install_result = gr.HTML(elem_id="extension_install_result") install_button.click( fn=modules.ui.wrap_gradio_call(install_extension_from_url, extra_outputs=[gr.update()]), - inputs=[install_dirname, install_url], + inputs=[install_dirname, install_branch, install_url], outputs=[extensions_table, install_result], ) -- cgit v1.2.1 From c19618f37059b425b1e53429ad8def2caa78cdec Mon Sep 17 00:00:00 2001 From: Ilya Khadykin Date: Sun, 9 Apr 2023 21:33:09 +0200 Subject: fix(extras): fix batch image processing on 'Extras\Batch Process' tab This change fixes an issue where an incorrect type was passed to the PIL.Image.open() function that caused the whole process to fail. Scope of this change is limited to only batch image processing, and it shouldn't affect other functionality. --- modules/postprocessing.py | 6 ++++-- modules/ui_postprocessing.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 09d8e605..c27ad8db 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -1,4 +1,6 @@ import os +import tempfile +from typing import List from PIL import Image @@ -6,7 +8,7 @@ from modules import shared, images, devices, scripts, scripts_postprocessing, ui from modules.shared import opts -def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True): +def run_postprocessing(extras_mode, image, image_folder: List[tempfile.NamedTemporaryFile], input_dir, output_dir, show_extras_results, *args, save_output: bool = True): devices.torch_gc() shared.state.begin() @@ -18,7 +20,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if extras_mode == 1: for img in image_folder: - image = Image.open(img) + image = Image.open(os.path.abspath(img.name)) image_data.append(image) image_names.append(os.path.splitext(img.orig_name)[0]) elif extras_mode == 2: diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index b418d955..d278e1b6 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -13,7 +13,7 @@ def create_ui(): extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image") with gr.TabItem('Batch Process', elem_id="extras_batch_process_tab") as tab_batch: - image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file", elem_id="extras_image_batch") + image_batch = gr.Files(label="Batch Process", interactive=True, elem_id="extras_image_batch") with gr.TabItem('Batch from Directory', elem_id="extras_batch_directory_tab") as tab_batch_dir: extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.", elem_id="extras_batch_input_dir") -- cgit v1.2.1 From 7c62bb2788d9cec10bab9d0154bd24f3658f7a83 Mon Sep 17 00:00:00 2001 From: yike5460 Date: Mon, 10 Apr 2023 09:38:26 +0800 Subject: fix: support for default branch --- modules/ui_extensions.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'modules') diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index d9487f83..b402bc8b 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -150,10 +150,17 @@ def install_extension_from_url(dirname, branch_name, url): try: shutil.rmtree(tmpdir, True) - with git.Repo.clone_from(url, tmpdir, branch=branch_name if branch_name else '') as repo: - repo.remote().fetch() - for submodule in repo.submodules: - submodule.update() + if branch_name == '': + # if no branch is specified, use the default branch + with git.Repo.clone_from(url, tmpdir) as repo: + repo.remote().fetch() + for submodule in repo.submodules: + submodule.update() + else: + with git.Repo.clone_from(url, tmpdir, branch=branch_name) as repo: + repo.remote().fetch() + for submodule in repo.submodules: + submodule.update() try: os.rename(tmpdir, target_dir) except OSError as err: @@ -376,7 +383,7 @@ def create_ui(): with gr.TabItem("Install from URL"): install_url = gr.Text(label="URL for extension's git repository") - install_branch = gr.Text(label="Branch name for extension's git repository", placeholder="Leave empty for default branch") + install_branch = gr.Text(label="Specific branch name", placeholder="Leave empty for default main branch") install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto") install_button = gr.Button(value="Install", variant="primary") install_result = gr.HTML(elem_id="extension_install_result") -- cgit v1.2.1 From 1c1106260300ca3956d9619875e28278b148adab Mon Sep 17 00:00:00 2001 From: papuSpartan Date: Mon, 10 Apr 2023 03:37:15 -0500 Subject: add token merging options to infotext when necessary. Bump tomesd version --- modules/generation_parameters_copypaste.py | 37 ++++++++++++++++++++++++++++++ modules/processing.py | 22 ++++++++++++++---- modules/shared.py | 2 +- 3 files changed, 56 insertions(+), 5 deletions(-) (limited to 'modules') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 6df76858..a7ede534 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -282,6 +282,32 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model res["Hires resize-1"] = 0 res["Hires resize-2"] = 0 + # Infer additional override settings for token merging + print("inferring settings for tomesd") + token_merging_ratio = res.get("Token merging ratio", None) + token_merging_ratio_hr = res.get("Token merging ratio hr", None) + + if token_merging_ratio is not None or token_merging_ratio_hr is not None: + res["Token merging"] = 'True' + + if token_merging_ratio is None: + res["Token merging hr only"] = 'True' + else: + res["Token merging hr only"] = 'False' + + if res.get("Token merging random", None) is None: + res["Token merging random"] = 'False' + if res.get("Token merging merge attention", None) is None: + res["Token merging merge attention"] = 'True' + if res.get("Token merging merge cross attention", None) is None: + res["Token merging merge cross attention"] = 'False' + if res.get("Token merging merge mlp", None) is None: + res["Token merging merge mlp"] = 'False' + if res.get("Token merging stride x", None) is None: + res["Token merging stride x"] = '2' + if res.get("Token merging stride y", None) is None: + res["Token merging stride y"] = '2' + restore_old_hires_fix_params(res) return res @@ -304,6 +330,17 @@ infotext_to_setting_name_mapping = [ ('UniPC skip type', 'uni_pc_skip_type'), ('UniPC order', 'uni_pc_order'), ('UniPC lower order final', 'uni_pc_lower_order_final'), + ('Token merging', 'token_merging'), + ('Token merging ratio', 'token_merging_ratio'), + ('Token merging hr only', 'token_merging_hr_only'), + ('Token merging ratio hr', 'token_merging_ratio_hr'), + ('Token merging random', 'token_merging_random'), + ('Token merging merge attention', 'token_merging_merge_attention'), + ('Token merging merge cross attention', 'token_merging_merge_cross_attention'), + ('Token merging merge mlp', 'token_merging_merge_mlp'), + ('Token merging maximum downsampling', 'token_merging_maximum_downsampling'), + ('Token merging stride x', 'token_merging_stride_x'), + ('Token merging stride y', 'token_merging_stride_y') ] diff --git a/modules/processing.py b/modules/processing.py index 670a7a28..95058d0b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -31,6 +31,12 @@ from einops import repeat, rearrange from blendmodes.blend import blendLayers, BlendType import tomesd +# add a logger for the processing module +logger = logging.getLogger(__name__) +# manually set output level here since there is no option to do so yet through launch options +# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') + + # some of those options should not be changed at all because they would break the model, so I removed them from options. opt_C = 4 opt_f = 8 @@ -477,6 +483,14 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, + "Token merging ratio": None if not (opts.token_merging or cmd_opts.token_merging) or opts.token_merging_hr_only else opts.token_merging_ratio, + "Token merging ratio hr": None if not (opts.token_merging or cmd_opts.token_merging) else opts.token_merging_ratio_hr, + "Token merging random": None if opts.token_merging_random is False else opts.token_merging_random, + "Token merging merge attention": None if opts.token_merging_merge_attention is True else opts.token_merging_merge_attention, + "Token merging merge cross attention": None if opts.token_merging_merge_cross_attention is False else opts.token_merging_merge_cross_attention, + "Token merging merge mlp": None if opts.token_merging_merge_mlp is False else opts.token_merging_merge_mlp, + "Token merging stride x": None if opts.token_merging_stride_x == 2 else opts.token_merging_stride_x, + "Token merging stride y": None if opts.token_merging_stride_y == 2 else opts.token_merging_stride_y } generation_params.update(p.extra_generation_params) @@ -502,16 +516,16 @@ def process_images(p: StableDiffusionProcessing) -> Processed: sd_vae.reload_vae_weights() if (opts.token_merging or cmd_opts.token_merging) and not opts.token_merging_hr_only: - print("\nApplying token merging\n") sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) + logger.debug('Token merging applied') res = process_images_inner(p) finally: # undo model optimizations made by tomesd if opts.token_merging or cmd_opts.token_merging: - print('\nRemoving token merging model optimizations\n') tomesd.remove_patch(p.sd_model) + logger.debug('Token merging model optimizations removed') # restore opts to original state if p.override_settings_restore_afterwards: @@ -954,11 +968,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): # case where user wants to use separate merge ratios if not opts.token_merging_hr_only: # clean patch done by first pass. (clobbering the first patch might be fine? this might be excessive) - print('Temporarily reverting token merging optimizations in preparation for next pass') tomesd.remove_patch(self.sd_model) + logger.debug('Temporarily removed token merging optimizations in preparation for next pass') - print("\nApplying token merging for high-res pass\n") sd_models.apply_token_merging(sd_model=self.sd_model, hr=True) + logger.debug('Applied token merging for high-res pass') samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) diff --git a/modules/shared.py b/modules/shared.py index 568acdc4..d9db7317 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -446,7 +446,7 @@ options_templates.update(options_section(('token_merging', 'Token Merging'), { ), # More advanced/niche settings: "token_merging_random": OptionInfo( - True, "Use random perturbations - Disabling might help with certain samplers", + False, "Use random perturbations - Can improve outputs for certain samplers. For others, it may cause visaul artifacting.", gr.Checkbox ), "token_merging_merge_attention": OptionInfo( -- cgit v1.2.1 From c510cfd24b995f8267df3391cc961ac398922ba4 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Mon, 10 Apr 2023 03:43:56 -0500 Subject: Update shared.py fix typo --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index d9db7317..edf11e43 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -446,7 +446,7 @@ options_templates.update(options_section(('token_merging', 'Token Merging'), { ), # More advanced/niche settings: "token_merging_random": OptionInfo( - False, "Use random perturbations - Can improve outputs for certain samplers. For others, it may cause visaul artifacting.", + False, "Use random perturbations - Can improve outputs for certain samplers. For others, it may cause visual artifacting.", gr.Checkbox ), "token_merging_merge_attention": OptionInfo( -- cgit v1.2.1 From a9902ca33119d6fae0a3623424bfc7ab86f2095a Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Mon, 10 Apr 2023 04:03:01 -0500 Subject: Update generation_parameters_copypaste.py --- modules/generation_parameters_copypaste.py | 1 - 1 file changed, 1 deletion(-) (limited to 'modules') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index a7ede534..ba2ca5ed 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -283,7 +283,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model res["Hires resize-2"] = 0 # Infer additional override settings for token merging - print("inferring settings for tomesd") token_merging_ratio = res.get("Token merging ratio", None) token_merging_ratio_hr = res.get("Token merging ratio hr", None) -- cgit v1.2.1 From dff60e2e74964a8b02b75ecd8cf8007ef67a9712 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Mon, 10 Apr 2023 04:10:50 -0500 Subject: Update sd_models.py --- modules/sd_models.py | 1 - 1 file changed, 1 deletion(-) (limited to 'modules') diff --git a/modules/sd_models.py b/modules/sd_models.py index 696a2333..efcf730d 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -560,7 +560,6 @@ def apply_token_merging(sd_model, hr: bool): ratio = shared.opts.token_merging_ratio if hr: ratio = shared.opts.token_merging_ratio_hr - print("effective hr pass merge ratio is "+str(ratio)) tomesd.apply_patch( sd_model, -- cgit v1.2.1 From dab5002c59ce1f68deae5e6e0c03e5e2c27155db Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Thu, 13 Apr 2023 23:12:33 -0400 Subject: sort self.word_embeddings without instantiating it a new dict --- modules/textual_inversion/textual_inversion.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 7c50839f..379df243 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -2,7 +2,7 @@ import os import sys import traceback import inspect -from collections import namedtuple, OrderedDict +from collections import namedtuple import torch import tqdm @@ -108,7 +108,7 @@ class DirWithTextualInversionEmbeddings: class EmbeddingDatabase: def __init__(self): self.ids_lookup = {} - self.word_embeddings = OrderedDict() + self.word_embeddings = {} self.skipped_embeddings = {} self.expected_shape = -1 self.embedding_dirs = {} @@ -234,7 +234,10 @@ class EmbeddingDatabase: embdir.update() # re-sort word_embeddings because load_from_dir may not load in alphabetic order. - self.word_embeddings = {e.name: e for e in sorted(self.word_embeddings.values(), key=lambda e: e.name.lower())} + # using a temporary copy so we don't reinitialize self.word_embeddings in case other objects have a reference to it. + sorted_word_embeddings = {e.name: e for e in sorted(self.word_embeddings.values(), key=lambda e: e.name.lower())} + self.word_embeddings.clear() + self.word_embeddings.update(sorted_word_embeddings) displayed_embeddings = (tuple(self.word_embeddings.keys()), tuple(self.skipped_embeddings.keys())) if self.previously_displayed_embeddings != displayed_embeddings: -- cgit v1.2.1 From 3af152d488db0c521f6058676e1a65c7157ccc14 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 14 Apr 2023 17:17:14 -0400 Subject: Fix image mask composite for weird resolutions --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..f49992d9 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -708,7 +708,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay: image_mask = p.mask_for_overlay.convert('RGB') - image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), p.mask_for_overlay.convert('L')).convert('RGBA') + image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') if opts.save_mask: images.save_image(image_mask, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask") -- cgit v1.2.1 From fbab3fc6d122fb4e6648dd82291a70fc348c0b4a Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 14 Apr 2023 17:24:55 -0400 Subject: Only handle image mask if any option enabled --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index f49992d9..5c6edc60 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -706,7 +706,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: image.info["parameters"] = text output_images.append(image) - if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay: + if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]): image_mask = p.mask_for_overlay.convert('RGB') image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') -- cgit v1.2.1 From 02e351880796422eac3bbaf7aa86332b588651ce Mon Sep 17 00:00:00 2001 From: tqwuliao Date: Sat, 15 Apr 2023 23:20:08 +0800 Subject: Add new FilenameGenerator [hasprompt..] --- modules/images.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index 2da988ee..cdbb77e1 100644 --- a/modules/images.py +++ b/modules/images.py @@ -349,6 +349,7 @@ class FilenameGenerator: 'prompt_no_styles': lambda self: self.prompt_no_style(), 'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False), 'prompt_words': lambda self: self.prompt_words(), + 'hasprompt': lambda self, *args: self.hasprompt(*args), #accept formats:[hasprompt..] } default_time_format = '%Y%m%d%H%M%S' @@ -357,6 +358,22 @@ class FilenameGenerator: self.seed = seed self.prompt = prompt self.image = image + + def hasprompt(self, *args): + lower = self.prompt.lower() + if self.p is None or self.prompt is None: + return None + outres = "" + for arg in args: + if arg != "": + division = arg.split("|") + expected = division[0].lower() + default = division[1] if len(division) > 1 else "" + if lower.find(expected) >= 0: + outres = f'{outres}{expected}' + else: + outres = outres if default == "" else f'{outres}{default}' + return sanitize_filename_part(outres) def prompt_no_style(self): if self.p is None or self.prompt is None: -- cgit v1.2.1 From 596556162efd66cca225dffa3765d77cd51f69fe Mon Sep 17 00:00:00 2001 From: File_xor Date: Sun, 16 Apr 2023 16:49:21 +0900 Subject: Add filename pattern for CLIP_stop_at_last_layers. --- modules/images.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index b3535070..6391c34c 100644 --- a/modules/images.py +++ b/modules/images.py @@ -352,6 +352,7 @@ class FilenameGenerator: 'prompt_no_styles': lambda self: self.prompt_no_style(), 'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False), 'prompt_words': lambda self: self.prompt_words(), + 'clip_skip': lambda: opts.data["CLIP_stop_at_last_layers"], } default_time_format = '%Y%m%d%H%M%S' -- cgit v1.2.1 From acbec225549987297383e3a31290b3f80ed064fd Mon Sep 17 00:00:00 2001 From: File_xor Date: Sun, 16 Apr 2023 17:14:11 +0900 Subject: Add self argument that is mandatory to [clip_skip] filename pattern. --- modules/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index 6391c34c..1a118a69 100644 --- a/modules/images.py +++ b/modules/images.py @@ -352,7 +352,7 @@ class FilenameGenerator: 'prompt_no_styles': lambda self: self.prompt_no_style(), 'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False), 'prompt_words': lambda self: self.prompt_words(), - 'clip_skip': lambda: opts.data["CLIP_stop_at_last_layers"], + 'clip_skip': lambda self: opts.data["CLIP_stop_at_last_layers"], } default_time_format = '%Y%m%d%H%M%S' -- cgit v1.2.1 From 984970068c2bdc14cff266129ca25a26fbccbf2e Mon Sep 17 00:00:00 2001 From: siutin Date: Mon, 17 Apr 2023 01:06:28 +0800 Subject: multi users support --- modules/call_queue.py | 23 ++++++++++++-------- modules/progress.py | 60 ++++++++++++++++++++++++++++++++++++--------------- modules/ui.py | 4 ++-- 3 files changed, 59 insertions(+), 28 deletions(-) (limited to 'modules') diff --git a/modules/call_queue.py b/modules/call_queue.py index 9888109e..632afcdd 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -4,6 +4,7 @@ import threading import traceback import time +import gradio as gr from modules import shared, progress queue_lock = threading.Lock() @@ -20,41 +21,45 @@ def wrap_queued_call(func): def wrap_gradio_gpu_call(func, extra_outputs=None): - def f(*args, **kwargs): + def f(request: gr.Request, *args, **kwargs): + user = request.username # if the first argument is a string that says "task(...)", it is treated as a job id if len(args) > 0 and type(args[0]) == str and args[0][0:5] == "task(" and args[0][-1] == ")": id_task = args[0] - progress.add_task_to_queue(id_task) + progress.add_task_to_queue(user, id_task) else: id_task = None with queue_lock: shared.state.begin() - progress.start_task(id_task) + progress.start_task(user, id_task) try: res = func(*args, **kwargs) finally: - progress.finish_task(id_task) - progress.set_last_task_result(id_task, res) + progress.finish_task(user, id_task) + progress.set_last_task_result(user, id_task, res) shared.state.end() return res - return wrap_gradio_call(f, extra_outputs=extra_outputs, add_stats=True) + return wrap_gradio_call(f, extra_outputs=extra_outputs, add_stats=True, add_request=True) -def wrap_gradio_call(func, extra_outputs=None, add_stats=False): - def f(*args, extra_outputs_array=extra_outputs, **kwargs): +def wrap_gradio_call(func, extra_outputs=None, add_stats=False, add_request=False): + def f(request: gr.Request, *args, extra_outputs_array=extra_outputs, **kwargs): run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats if run_memmon: shared.mem_mon.monitor() t = time.perf_counter() try: - res = list(func(*args, **kwargs)) + if add_request: + res = list(func(request, *args, **kwargs)) + else: + res = list(func(*args, **kwargs)) except Exception as e: # When printing out our debug argument list, do not print out more than a MB of text max_debug_str_len = 131072 # (1024*1024)/8 diff --git a/modules/progress.py b/modules/progress.py index e99267f5..13568701 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -4,7 +4,9 @@ import time import gradio as gr from pydantic import BaseModel, Field -from typing import List +from typing import Optional +from fastapi import Depends, Security +from fastapi.security import APIKeyCookie from modules import call_queue from modules.shared import opts @@ -12,57 +14,71 @@ from modules.shared import opts import modules.shared as shared +current_task_user = None current_task = None pending_tasks = {} finished_tasks = [] -def start_task(id_task): +def start_task(user, id_task): global current_task + global current_task_user + current_task_user = user current_task = id_task - pending_tasks.pop(id_task, None) + pending_tasks.pop((user, id_task), None) -def finish_task(id_task): +def finish_task(user, id_task): global current_task + global current_task_user if current_task == id_task: current_task = None - finished_tasks.append(id_task) + if current_task_user == user: + current_task_user = None + + finished_tasks.append((user, id_task)) if len(finished_tasks) > 16: finished_tasks.pop(0) -def add_task_to_queue(id_job): - pending_tasks[id_job] = time.time() +def add_task_to_queue(user, id_job): + pending_tasks[(user, id_job)] = time.time() last_task_id = None last_task_result = None +last_task_user = None + +def set_last_task_result(user, id_job, result): -def set_last_task_result(id_job, result): global last_task_id global last_task_result + global last_task_user last_task_id = id_job last_task_result = result + last_task_user = user -def restore_progress_call(): +def restore_progress_call(request: gr.Request): if current_task is None: # image, generation_info, html_info, html_log return tuple(list([None, None, None, None])) else: + user = request.username - t_task = current_task - with call_queue.queue_lock_condition: - call_queue.queue_lock_condition.wait_for(lambda: t_task == last_task_id) + if current_task_user == user: + t_task = current_task + with call_queue.queue_lock_condition: + call_queue.queue_lock_condition.wait_for(lambda: t_task == last_task_id) - return last_task_result + return last_task_result + return tuple(list([None, None, None, None])) class CurrentTaskResponse(BaseModel): current_task: str = Field(default=None, title="Task ID", description="id of the current progress task") @@ -87,6 +103,19 @@ def setup_progress_api(app): return app.add_api_route("/internal/progress", progressapi, methods=["POST"], response_model=ProgressResponse) def setup_current_task_api(app): + + def get_current_user(token: Optional[str] = Security(APIKeyCookie(name="access-token", auto_error=False))): + return None if token is None else app.tokens.get(token) + + def current_task_api(current_user: str = Depends(get_current_user)): + + if app.auth is None or current_task_user == current_user: + current_user_task = current_task + else: + current_user_task = None + + return CurrentTaskResponse(current_task=current_user_task) + return app.add_api_route("/internal/current_task", current_task_api, methods=["GET"], response_model=CurrentTaskResponse) def progressapi(req: ProgressRequest): @@ -127,7 +156,4 @@ def progressapi(req: ProgressRequest): else: live_preview = None - return ProgressResponse(active=active, queued=queued, completed=completed, progress=progress, eta=eta, live_preview=live_preview, id_live_preview=id_live_preview, textinfo=shared.state.textinfo) - -def current_task_api(): - return CurrentTaskResponse(current_task=current_task) \ No newline at end of file + return ProgressResponse(active=active, queued=queued, completed=completed, progress=progress, eta=eta, live_preview=live_preview, id_live_preview=id_live_preview, textinfo=shared.state.textinfo) \ No newline at end of file diff --git a/modules/ui.py b/modules/ui.py index 8fc17ce7..a7b3cccb 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -582,7 +582,7 @@ def create_ui(): res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False) restore_progress_button.click( - fn=lambda: restore_progress_call(), + fn=restore_progress_call, _js="() => restoreProgress('txt2img')", inputs=[], outputs=[ @@ -914,7 +914,7 @@ def create_ui(): res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False) restore_progress_button.click( - fn=lambda: restore_progress_call(), + fn=restore_progress_call, _js="() => restoreProgress('img2img')", inputs=[], outputs=[ -- cgit v1.2.1 From 56f8a6b081e3dbfc93956e5c44e507e74a2ba040 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 16 Apr 2023 20:34:52 -0400 Subject: Fix sampler schedules with step multiplier --- modules/processing.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8d..f6514637 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -639,8 +639,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: processed = Processed(p, [], p.seed, "") file.write(processed.infotext(p, 0)) - uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc) - c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c) + try: + step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 + except: + step_multiplier = 1 + uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc) + c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c) if len(model_hijack.comments) > 0: for comment in model_hijack.comments: -- cgit v1.2.1 From 81b276a1ea943fc177c7ec16b272eec7bccb62f7 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 16 Apr 2023 20:39:18 -0400 Subject: Add second order samplers compat option --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..e1d4088f 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -338,6 +338,7 @@ options_templates.update(options_section(('compatibility', "Compatibility"), { "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."), "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), + "fix_second_order_samplers_schedule": OptionInfo(False, "Fix prompt schedule for second order samplers."), })) options_templates.update(options_section(('interrogate', "Interrogate Options"), { -- cgit v1.2.1 From 4d0c8163036b42dd66aad922c22d3b3d19217bd9 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 16 Apr 2023 20:39:45 -0400 Subject: Modify step multiplier flow --- modules/processing.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index f6514637..15ad629f 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -639,10 +639,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: processed = Processed(p, [], p.seed, "") file.write(processed.infotext(p, 0)) - try: - step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 - except: - step_multiplier = 1 + step_multiplier = 1 + if shared.opts.fix_second_order_samplers_schedule: + try: + step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 + except: + pass uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c) -- cgit v1.2.1 From 234fa9a57da00d3f549ea43348ab664665fc275e Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 16 Apr 2023 21:06:22 -0400 Subject: Update shared.py --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index e1d4088f..6f13ec52 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -338,7 +338,7 @@ options_templates.update(options_section(('compatibility', "Compatibility"), { "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), "no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."), "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), - "fix_second_order_samplers_schedule": OptionInfo(False, "Fix prompt schedule for second order samplers."), + "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."), })) options_templates.update(options_section(('interrogate', "Interrogate Options"), { -- cgit v1.2.1 From 9de7298898951d6038eeadee78f7be3b09d2cfc3 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 16 Apr 2023 21:06:37 -0400 Subject: Update processing.py --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 15ad629f..2f391ee4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -640,7 +640,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: file.write(processed.infotext(p, 0)) step_multiplier = 1 - if shared.opts.fix_second_order_samplers_schedule: + if not shared.opts.dont_fix_second_order_samplers_schedule: try: step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 except: -- cgit v1.2.1 From 3e5b3c79e49ec3a10174c250815dabce6efdddca Mon Sep 17 00:00:00 2001 From: siutin Date: Mon, 17 Apr 2023 11:50:08 +0800 Subject: replace with #wrap_session_call --- modules/call_queue.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'modules') diff --git a/modules/call_queue.py b/modules/call_queue.py index 632afcdd..43f6ebe0 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -10,6 +10,11 @@ from modules import shared, progress queue_lock = threading.Lock() queue_lock_condition = threading.Condition(lock=queue_lock) +def wrap_session_call(func): + def f(request: gr.Request, *args, **kwargs): + return func(request, *args, **kwargs) + return f + def wrap_queued_call(func): def f(*args, **kwargs): with queue_lock: @@ -45,21 +50,18 @@ def wrap_gradio_gpu_call(func, extra_outputs=None): return res - return wrap_gradio_call(f, extra_outputs=extra_outputs, add_stats=True, add_request=True) + return wrap_session_call(wrap_gradio_call(f, extra_outputs=extra_outputs, add_stats=True)) -def wrap_gradio_call(func, extra_outputs=None, add_stats=False, add_request=False): - def f(request: gr.Request, *args, extra_outputs_array=extra_outputs, **kwargs): +def wrap_gradio_call(func, extra_outputs=None, add_stats=False): + def f(*args, extra_outputs_array=extra_outputs, **kwargs): run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats if run_memmon: shared.mem_mon.monitor() t = time.perf_counter() try: - if add_request: - res = list(func(request, *args, **kwargs)) - else: - res = list(func(*args, **kwargs)) + res = list(func(*args, **kwargs)) except Exception as e: # When printing out our debug argument list, do not print out more than a MB of text max_debug_str_len = 131072 # (1024*1024)/8 -- cgit v1.2.1 From eddcdb8061012113eb9a97ec6c1af538344970a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C4=9Bj=20Ra=C4=8Dinsk=C3=BD?= Date: Mon, 17 Apr 2023 23:48:28 +0200 Subject: adds label to buttons to make them hide --- modules/ui.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index 627fbe0b..37fcb92c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -171,8 +171,8 @@ def create_seed_inputs(target_interface): with FormRow(elem_id=target_interface + '_seed_row', variant="compact"): seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed') seed.style(container=False) - random_seed = ToolButton(random_symbol, elem_id=target_interface + '_random_seed') - reuse_seed = ToolButton(reuse_symbol, elem_id=target_interface + '_reuse_seed') + random_seed = ToolButton(random_symbol, elem_id=target_interface + '_random_seed', label='Random seed') + reuse_seed = ToolButton(reuse_symbol, elem_id=target_interface + '_reuse_seed', label='Reuse seed') seed_checkbox = gr.Checkbox(label='Extra', elem_id=target_interface + '_subseed_show', value=False) @@ -468,7 +468,7 @@ def create_ui(): height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn") + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn", label="Switch dims") if opts.dimensions_and_batch_together: with gr.Column(elem_id="txt2img_column_batch"): @@ -1705,7 +1705,7 @@ def create_ui(): if init_field is not None: init_field(saved_value) - if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number, gr.Dropdown] and x.visible: + if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number, gr.Dropdown, ToolButton] and x.visible: apply_field(x, 'visible') if type(x) == gr.Slider: -- cgit v1.2.1 From f4b332f0419e09cec6983edcd07aae2ee0c14c24 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 18 Apr 2023 17:01:46 -0600 Subject: Add "None" option to extra networks dropdowns --- modules/extra_networks_hypernet.py | 2 +- modules/shared.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/extra_networks_hypernet.py b/modules/extra_networks_hypernet.py index d3a4d7ad..33d100dd 100644 --- a/modules/extra_networks_hypernet.py +++ b/modules/extra_networks_hypernet.py @@ -9,7 +9,7 @@ class ExtraNetworkHypernet(extra_networks.ExtraNetwork): def activate(self, p, params_list): additional = shared.opts.sd_hypernetwork - if additional != "" and additional in shared.hypernetworks and len([x for x in params_list if x.items[0] == additional]) == 0: + if additional != "None" and additional in shared.hypernetworks and len([x for x in params_list if x.items[0] == additional]) == 0: p.all_prompts = [x + f"" for x in p.all_prompts] params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..5a5fbae6 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -361,7 +361,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"), "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"), "extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": [""] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks), + "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks), })) options_templates.update(options_section(('ui', "User interface"), { -- cgit v1.2.1 From d40e44ade479f7bba30d5317381cbc58c861775b Mon Sep 17 00:00:00 2001 From: Deciare <1689220+deciare@users.noreply.github.com> Date: Tue, 18 Apr 2023 23:18:58 -0400 Subject: Option to use CPU for random number generation. Makes a given manual seed generate the same images across different platforms, independently of the GPU architecture in use. Fixes #9613. --- modules/devices.py | 8 ++++++-- modules/sd_samplers_common.py | 9 +++++++++ modules/sd_samplers_kdiffusion.py | 2 +- modules/shared.py | 1 + 4 files changed, 17 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/devices.py b/modules/devices.py index 52c3e7cd..3bc86a6a 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -92,14 +92,18 @@ def cond_cast_float(input): def randn(seed, shape): + from modules.shared import opts + torch.manual_seed(seed) - if device.type == 'mps': + if opts.use_cpu_randn or device.type == 'mps': return torch.randn(shape, device=cpu).to(device) return torch.randn(shape, device=device) def randn_without_seed(shape): - if device.type == 'mps': + from modules.shared import opts + + if opts.use_cpu_randn or device.type == 'mps': return torch.randn(shape, device=cpu).to(device) return torch.randn(shape, device=device) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index a1aac7cf..e6a372d5 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -60,3 +60,12 @@ def store_latent(decoded): class InterruptedException(BaseException): pass + +if opts.use_cpu_randn: + import torchsde._brownian.brownian_interval + + def torchsde_randn(size, dtype, device, seed): + generator = torch.Generator(devices.cpu).manual_seed(int(seed)) + return torch.randn(size, dtype=dtype, device=devices.cpu, generator=generator).to(device) + + torchsde._brownian.brownian_interval._randn = torchsde_randn diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index e9f08518..13f4567a 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -190,7 +190,7 @@ class TorchHijack: if noise.shape == x.shape: return noise - if x.device.type == 'mps': + if opts.use_cpu_randn or x.device.type == 'mps': return torch.randn_like(x, device=devices.cpu).to(x.device) else: return torch.randn_like(x) diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..59b037d5 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -331,6 +331,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }), "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), + "use_cpu_randn": OptionInfo(False, "Use CPU for random number generation to make manual seeds generate the same image across platforms. This may change existing seeds."), })) options_templates.update(options_section(('compatibility', "Compatibility"), { -- cgit v1.2.1 From dec5cdd9b89dd683f04fb904ebd8a56dfce860ae Mon Sep 17 00:00:00 2001 From: AdjointOperator Date: Wed, 19 Apr 2023 15:35:50 +0800 Subject: add tiled inference support for ScuNET --- modules/shared.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..056f9fc6 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -283,6 +283,8 @@ options_templates.update(options_section(('upscaling', "Upscaling"), { "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}), "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), + "SCUNET_tile": OptionInfo(256, "Tile size for SCUNET upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), + "SCUNET_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SCUNET upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}), })) options_templates.update(options_section(('face-restoration', "Face restoration"), { -- cgit v1.2.1 From 2c24e09dfc431ef7617134a807bf741f0b0c9fa3 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Thu, 20 Apr 2023 14:53:04 +0800 Subject: Fix gallery not being refreshed correctly --- modules/txt2img.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'modules') diff --git a/modules/txt2img.py b/modules/txt2img.py index 16841d0f..6ebca656 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -7,6 +7,7 @@ from modules.shared import opts, cmd_opts import modules.shared as shared import modules.processing as processing from modules.ui import plaintext_to_html +from time import time def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, override_settings_texts, *args): @@ -63,6 +64,9 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step if opts.samples_log_stdout: print(generation_info_js) + for img in processed.images: + img.already_saved_as += f'?{int(time())}' + if opts.do_not_show_images: processed.images = [] -- cgit v1.2.1 From 988dd02632bf38e64bc0cf394ece2a5f7a64e15b Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Thu, 20 Apr 2023 15:08:31 +0800 Subject: Add img2img refreshed correctly --- modules/img2img.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'modules') diff --git a/modules/img2img.py b/modules/img2img.py index 953ac5d2..6da974e4 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -15,6 +15,7 @@ import modules.processing as processing from modules.ui import plaintext_to_html import modules.images as images import modules.scripts +from time import time def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): @@ -179,6 +180,9 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s if opts.samples_log_stdout: print(generation_info_js) + for img in processed.images: + img.already_saved_as += f'?{int(time())}' + if opts.do_not_show_images: processed.images = [] -- cgit v1.2.1 From eff00413ae76e01b9a24b13dee6a0dda98f643f2 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Thu, 20 Apr 2023 21:23:56 +0800 Subject: Refresh bug fix --- modules/img2img.py | 3 ++- modules/txt2img.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/img2img.py b/modules/img2img.py index 6da974e4..fea51667 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -181,7 +181,8 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s print(generation_info_js) for img in processed.images: - img.already_saved_as += f'?{int(time())}' + if hasattr(img, 'already_saved_as'): + img.already_saved_as += f'?{int(time())}' if opts.do_not_show_images: processed.images = [] diff --git a/modules/txt2img.py b/modules/txt2img.py index 6ebca656..57278bdc 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -65,7 +65,8 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step print(generation_info_js) for img in processed.images: - img.already_saved_as += f'?{int(time())}' + if hasattr(img, 'already_saved_as'): + img.already_saved_as += f'?{int(time())}' if opts.do_not_show_images: processed.images = [] -- cgit v1.2.1 From bbc7a778d8a256e57c310bf27c6104a84beb5c78 Mon Sep 17 00:00:00 2001 From: dennissheng Date: Mon, 24 Apr 2023 17:36:16 +0800 Subject: fix ui img2img scripts --- modules/img2img.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/img2img.py b/modules/img2img.py index 953ac5d2..d22d9a49 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -151,7 +151,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s override_settings=override_settings, ) - p.scripts = modules.scripts.scripts_txt2img + p.scripts = modules.scripts.scripts_img2img p.script_args = args if shared.cmd_opts.enable_console_prompts: -- cgit v1.2.1 From 0e071ae504a8219934b2f35a1d88dcb1f628d7c3 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 21 Apr 2023 20:19:58 -0600 Subject: Custom delimiters --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecb..3cc2f8ba 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -382,6 +382,7 @@ options_templates.update(options_section(('ui', "User interface"), { "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row"), "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), + "keyedit_delimiters": OptionInfo(".,\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), "quicksettings": OptionInfo("sd_model_checkpoint", "Quicksettings list"), "hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": [x for x in tab_names]}), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), -- cgit v1.2.1 From b2f6e0704e178f64881f507f3a48ff36e63e1a62 Mon Sep 17 00:00:00 2001 From: catalpaaa Date: Tue, 25 Apr 2023 07:27:24 -0700 Subject: add subpath support --- modules/cmd_args.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 81c0b82a..bdf106bf 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -101,3 +101,4 @@ parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gra parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers") parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) +parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy') \ No newline at end of file -- cgit v1.2.1 From bb426de1cd1380844eb048f242bca6aa254edf58 Mon Sep 17 00:00:00 2001 From: darnell8 Date: Tue, 25 Apr 2023 22:53:06 +0800 Subject: Fix CLIP FileExistsError --- modules/interrogate.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/interrogate.py b/modules/interrogate.py index cbb80683..ecdb11bb 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -32,7 +32,8 @@ def download_default_clip_interrogate_categories(content_dir): category_types = ["artists", "flavors", "mediums", "movements"] try: - os.makedirs(tmpdir) + if not os.path.exists(tmpdir): + os.makedirs(tmpdir) for category_type in category_types: torch.hub.download_url_to_file(f"https://raw.githubusercontent.com/pharmapsychotic/clip-interrogator/main/clip_interrogator/data/{category_type}.txt", os.path.join(tmpdir, f"{category_type}.txt")) os.rename(tmpdir, content_dir) @@ -41,7 +42,7 @@ def download_default_clip_interrogate_categories(content_dir): errors.display(e, "downloading default CLIP interrogate categories") finally: if os.path.exists(tmpdir): - os.remove(tmpdir) + os.removedirs(tmpdir) class InterrogateModels: -- cgit v1.2.1 From 43186ad08407c08835f5678d10c7600e23b7fe8f Mon Sep 17 00:00:00 2001 From: Garrett Sutula Date: Thu, 27 Apr 2023 20:29:21 -0400 Subject: Add tls_verify arg for use with self-signed certs --- modules/cmd_args.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 81c0b82a..3aeecdcc 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -95,6 +95,7 @@ parser.add_argument("--cors-allow-origins", type=str, help="Allowed CORS origin( parser.add_argument("--cors-allow-origins-regex", type=str, help="Allowed CORS origin(s) in the form of a single regular expression", default=None) parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None) parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None) +parser.add_argument("--tls-verify", type=bool, help="Enables the use of self-signed certificates when set to False", default=None) parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None) parser.add_argument("--gradio-queue", action='store_true', help="does not do anything", default=True) parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gradio queue; causes the webpage to use http requests instead of websockets; was the defaul in earlier versions") -- cgit v1.2.1 From d1e62b296146bd158630b8444e4720a1ae445151 Mon Sep 17 00:00:00 2001 From: Garrett Sutula Date: Thu, 27 Apr 2023 21:30:19 -0400 Subject: Improve param semantics, --- modules/cmd_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 3aeecdcc..f47c21bb 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -95,7 +95,7 @@ parser.add_argument("--cors-allow-origins", type=str, help="Allowed CORS origin( parser.add_argument("--cors-allow-origins-regex", type=str, help="Allowed CORS origin(s) in the form of a single regular expression", default=None) parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None) parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None) -parser.add_argument("--tls-verify", type=bool, help="Enables the use of self-signed certificates when set to False", default=None) +parser.add_argument("--disable-tls-verify", action="store_false", help="When passed, enables the use of self-signed certificates.", default=None) parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None) parser.add_argument("--gradio-queue", action='store_true', help="does not do anything", default=True) parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gradio queue; causes the webpage to use http requests instead of websockets; was the defaul in earlier versions") -- cgit v1.2.1 From ed46abea356931ab051bee0622d2568deaad5f98 Mon Sep 17 00:00:00 2001 From: Garrett Sutula Date: Thu, 27 Apr 2023 21:30:55 -0400 Subject: fix for method moved to gradio_client --- modules/api/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 518b2a61..8134fe58 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -6,7 +6,7 @@ import uvicorn import gradio as gr from threading import Lock from io import BytesIO -from gradio.processing_utils import decode_base64_to_file +from gradio_client.processing_utils import decode_base64_to_file from fastapi import APIRouter, Depends, FastAPI, Request, Response from fastapi.security import HTTPBasic, HTTPBasicCredentials from fastapi.exceptions import HTTPException -- cgit v1.2.1 From aac478cb9d39354bdb7778a37529d931dc3f87d6 Mon Sep 17 00:00:00 2001 From: Garrett Sutula Date: Thu, 27 Apr 2023 21:40:16 -0400 Subject: Should be "utils" --- modules/api/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 8134fe58..2346efd2 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -6,7 +6,7 @@ import uvicorn import gradio as gr from threading import Lock from io import BytesIO -from gradio_client.processing_utils import decode_base64_to_file +from gradio_client.utils import decode_base64_to_file from fastapi import APIRouter, Depends, FastAPI, Request, Response from fastapi.security import HTTPBasic, HTTPBasicCredentials from fastapi.exceptions import HTTPException -- cgit v1.2.1 From 101a18fc8466577501b57eac6a4b5d07351c9ec6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 09:17:35 +0300 Subject: bump gradio to 3.27 --- modules/api/api.py | 12 +++--------- modules/postprocessing.py | 9 +++++++-- modules/ui.py | 2 +- modules/ui_common.py | 2 +- 4 files changed, 12 insertions(+), 13 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 518b2a61..5ed670e9 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -6,7 +6,6 @@ import uvicorn import gradio as gr from threading import Lock from io import BytesIO -from gradio.processing_utils import decode_base64_to_file from fastapi import APIRouter, Depends, FastAPI, Request, Response from fastapi.security import HTTPBasic, HTTPBasicCredentials from fastapi.exceptions import HTTPException @@ -395,16 +394,11 @@ class Api: def extras_batch_images_api(self, req: ExtrasBatchImagesRequest): reqDict = setUpscalers(req) - def prepareFiles(file): - file = decode_base64_to_file(file.data, file_path=file.name) - file.orig_name = file.name - return file - - reqDict['image_folder'] = list(map(prepareFiles, reqDict['imageList'])) - reqDict.pop('imageList') + image_list = reqDict.pop('imageList', []) + image_folder = [decode_base64_to_image(x.data) for x in image_list] with self.queue_lock: - result = postprocessing.run_extras(extras_mode=1, image="", input_dir="", output_dir="", save_output=False, **reqDict) + result = postprocessing.run_extras(extras_mode=1, image_folder=image_folder, image="", input_dir="", output_dir="", save_output=False, **reqDict) return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 09d8e605..ba5745b9 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -18,9 +18,14 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if extras_mode == 1: for img in image_folder: - image = Image.open(img) + if isinstance(img, Image.Image): + fn = '' + else: + image = Image.open(img) + fn = os.path.splitext(img.orig_name)[0] + image_data.append(image) - image_names.append(os.path.splitext(img.orig_name)[0]) + image_names.append(fn) elif extras_mode == 2: assert not shared.cmd_opts.hide_ui_dir_config, '--hide-ui-dir-config option must be disabled' assert input_dir, 'input directory not selected' diff --git a/modules/ui.py b/modules/ui.py index 627fbe0b..dd28bdbb 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1204,7 +1204,7 @@ def create_ui(): with gr.Column(elem_id='ti_gallery_container'): ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) - ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(grid=4) + ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) ti_progress = gr.HTML(elem_id="ti_progress", value="") ti_outcome = gr.HTML(elem_id="ti_error", value="") diff --git a/modules/ui_common.py b/modules/ui_common.py index 3b11dcc8..27ab3ebb 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -125,7 +125,7 @@ Requested path was: {f} with gr.Column(variant='panel', elem_id=f"{tabname}_results"): with gr.Group(elem_id=f"{tabname}_gallery_container"): - result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery").style(grid=4) + result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery").style(columns=4) generation_info = None with gr.Column(): -- cgit v1.2.1 From 5a666f3904c39bb3d7d86818b2b6579325a8fa8a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 09:17:35 +0300 Subject: bump gradio to 3.27 --- modules/postprocessing.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/postprocessing.py b/modules/postprocessing.py index ba5745b9..ff055aae 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -19,6 +19,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if extras_mode == 1: for img in image_folder: if isinstance(img, Image.Image): + image = img fn = '' else: image = Image.open(img) -- cgit v1.2.1 From 1514add5597d3dde0360110d158da7772b054cad Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 09:42:42 +0300 Subject: remove unneded imports and type signature --- modules/postprocessing.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 9cb80957..4dc1a2ab 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -1,6 +1,4 @@ import os -import tempfile -from typing import List from PIL import Image @@ -8,7 +6,7 @@ from modules import shared, images, devices, scripts, scripts_postprocessing, ui from modules.shared import opts -def run_postprocessing(extras_mode, image, image_folder: List[tempfile.NamedTemporaryFile], input_dir, output_dir, show_extras_results, *args, save_output: bool = True): +def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True): devices.torch_gc() shared.state.begin() -- cgit v1.2.1 From 642d96dcc83a66547899896c410bc27a34924c3f Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 10:04:01 +0300 Subject: use exist_ok=True instead of checking if directory exists --- modules/interrogate.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/interrogate.py b/modules/interrogate.py index ecdb11bb..e1665708 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -32,8 +32,7 @@ def download_default_clip_interrogate_categories(content_dir): category_types = ["artists", "flavors", "mediums", "movements"] try: - if not os.path.exists(tmpdir): - os.makedirs(tmpdir) + os.makedirs(tmpdir, exist_ok=True) for category_type in category_types: torch.hub.download_url_to_file(f"https://raw.githubusercontent.com/pharmapsychotic/clip-interrogator/main/clip_interrogator/data/{category_type}.txt", os.path.join(tmpdir, f"{category_type}.txt")) os.rename(tmpdir, content_dir) -- cgit v1.2.1 From 5fe0dd79beaa5ef737ff85254ee9870f60ae9464 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 11:29:37 +0300 Subject: rename CPU RNG to RNG source in settings, add infotext and parameters copypaste support to RNG source --- modules/devices.py | 4 ++-- modules/generation_parameters_copypaste.py | 5 +++++ modules/processing.py | 3 ++- modules/sd_samplers_common.py | 3 ++- modules/sd_samplers_kdiffusion.py | 2 +- modules/shared.py | 2 +- 6 files changed, 13 insertions(+), 6 deletions(-) (limited to 'modules') diff --git a/modules/devices.py b/modules/devices.py index 3bc86a6a..c705a3cb 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -95,7 +95,7 @@ def randn(seed, shape): from modules.shared import opts torch.manual_seed(seed) - if opts.use_cpu_randn or device.type == 'mps': + if opts.randn_source == "CPU" or device.type == 'mps': return torch.randn(shape, device=cpu).to(device) return torch.randn(shape, device=device) @@ -103,7 +103,7 @@ def randn(seed, shape): def randn_without_seed(shape): from modules.shared import opts - if opts.use_cpu_randn or device.type == 'mps': + if opts.randn_source == "CPU" or device.type == 'mps': return torch.randn(shape, device=cpu).to(device) return torch.randn(shape, device=device) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 6df76858..e7269363 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -284,6 +284,10 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model restore_old_hires_fix_params(res) + # Missing RNG means the default was set, which is GPU RNG + if "RNG" not in res: + res["RNG"] = "GPU" + return res @@ -304,6 +308,7 @@ infotext_to_setting_name_mapping = [ ('UniPC skip type', 'uni_pc_skip_type'), ('UniPC order', 'uni_pc_order'), ('UniPC lower order final', 'uni_pc_lower_order_final'), + ('RNG', 'randn_source'), ] diff --git a/modules/processing.py b/modules/processing.py index 5556afc5..7bac154d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -477,7 +477,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, - "Init image hash": getattr(p, 'init_img_hash', None) + "Init image hash": getattr(p, 'init_img_hash', None), + "RNG": (opts.randn_source if opts.randn_source != "GPU" else None) } generation_params.update(p.extra_generation_params) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index e6a372d5..bc074238 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -61,7 +61,8 @@ def store_latent(decoded): class InterruptedException(BaseException): pass -if opts.use_cpu_randn: + +if opts.randn_source == "CPU": import torchsde._brownian.brownian_interval def torchsde_randn(size, dtype, device, seed): diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 13f4567a..a547d1b5 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -190,7 +190,7 @@ class TorchHijack: if noise.shape == x.shape: return noise - if opts.use_cpu_randn or x.device.type == 'mps': + if opts.randn_source == "CPU" or x.device.type == 'mps': return torch.randn_like(x, device=devices.cpu).to(x.device) else: return torch.randn_like(x) diff --git a/modules/shared.py b/modules/shared.py index b5b401fe..73704889 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -334,7 +334,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }), "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), - "use_cpu_randn": OptionInfo(False, "Use CPU for random number generation to make manual seeds generate the same image across platforms. This may change existing seeds."), + "randn_source": OptionInfo("GPU", "Random number generator source. Changes seeds drastically. Use CPU to produce the same picture across different vidocard vendors.", gr.Radio, {"choices": ["GPU", "CPU"]}), })) options_templates.update(options_section(('compatibility', "Compatibility"), { -- cgit v1.2.1 From ee71eee1818f6f6eba9895c93ba25e0cad27e069 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 12:36:50 +0300 Subject: stuff related to torch version change --- modules/safe.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/safe.py b/modules/safe.py index 82d44be3..dadf319c 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -1,6 +1,5 @@ # this code is adapted from the script contributed by anon from /h/ -import io import pickle import collections import sys @@ -12,11 +11,9 @@ import _codecs import zipfile import re - # PyTorch 1.13 and later have _TypedStorage renamed to TypedStorage TypedStorage = torch.storage.TypedStorage if hasattr(torch.storage, 'TypedStorage') else torch.storage._TypedStorage - def encode(*args): out = _codecs.encode(*args) return out @@ -27,7 +24,7 @@ class RestrictedUnpickler(pickle.Unpickler): def persistent_load(self, saved_id): assert saved_id[0] == 'storage' - return TypedStorage() + return TypedStorage(_internal=True) def find_class(self, module, name): if self.extra_handler is not None: -- cgit v1.2.1 From b06205eaf6fc8cb98c317ca6f82bb048a5034f72 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 12:52:09 +0300 Subject: Allow user input for gradio theme selection --- modules/shared.py | 2 +- modules/ui_components.py | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 8b5f752e..4313e4af 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -410,7 +410,7 @@ options_templates.update(options_section(('ui', "User interface"), { "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), - "gradio_theme": OptionInfo("Default", "Gradio theme (requires restart)", gr.Dropdown, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}) + "gradio_theme": OptionInfo("Default", "Gradio theme (requires restart)", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}) })) options_templates.update(options_section(('ui', "Live previews"), { diff --git a/modules/ui_components.py b/modules/ui_components.py index 2b1da2cb..64451df7 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -62,3 +62,13 @@ class DropdownMulti(FormComponent, gr.Dropdown): def get_block_name(self): return "dropdown" + + +class DropdownEditable(FormComponent, gr.Dropdown): + """Same as gr.Dropdown but allows editing value""" + def __init__(self, **kwargs): + super().__init__(allow_custom_value=True, **kwargs) + + def get_block_name(self): + return "dropdown" + -- cgit v1.2.1 From cb940a583d909f5f144ffc6e4ad83a319620df69 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 13:45:05 +0300 Subject: fix extension installation broken by #9518 --- modules/ui_extensions.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index b402bc8b..e90bedc8 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -129,7 +129,7 @@ def normalize_git_url(url): return url -def install_extension_from_url(dirname, branch_name, url): +def install_extension_from_url(dirname, url, branch_name=None): check_access() assert url, 'No URL specified' @@ -150,7 +150,7 @@ def install_extension_from_url(dirname, branch_name, url): try: shutil.rmtree(tmpdir, True) - if branch_name == '': + if not branch_name: # if no branch is specified, use the default branch with git.Repo.clone_from(url, tmpdir) as repo: repo.remote().fetch() @@ -390,7 +390,7 @@ def create_ui(): install_button.click( fn=modules.ui.wrap_gradio_call(install_extension_from_url, extra_outputs=[gr.update()]), - inputs=[install_dirname, install_branch, install_url], + inputs=[install_dirname, install_url, install_branch], outputs=[extensions_table, install_result], ) -- cgit v1.2.1 From 1d11e896984c883f6a0debb3abaef945595cbc70 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 15:57:09 +0300 Subject: rework Negative Guidance minimum sigma to work with AND, add infotext and copypaste parameters support --- modules/generation_parameters_copypaste.py | 1 + modules/processing.py | 3 ++- modules/sd_samplers_kdiffusion.py | 43 +++++++++++++++++------------- 3 files changed, 28 insertions(+), 19 deletions(-) (limited to 'modules') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index e7269363..99f1a0d3 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -309,6 +309,7 @@ infotext_to_setting_name_mapping = [ ('UniPC order', 'uni_pc_order'), ('UniPC lower order final', 'uni_pc_lower_order_final'), ('RNG', 'randn_source'), + ('NGMS', 's_min_uncond'), ] diff --git a/modules/processing.py b/modules/processing.py index 04a06290..c50784f4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -480,7 +480,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, "Init image hash": getattr(p, 'init_img_hash', None), - "RNG": (opts.randn_source if opts.randn_source != "GPU" else None) + "RNG": opts.randn_source if opts.randn_source != "GPU" else None, + "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, } generation_params.update(p.extra_generation_params) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index d42d5fcf..f8aaac59 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -115,20 +115,21 @@ class CFGDenoiser(torch.nn.Module): sigma_in = denoiser_params.sigma tensor = denoiser_params.text_cond uncond = denoiser_params.text_uncond + skip_uncond = False - if self.step % 2 and s_min_uncond > 0 and not is_edit_model: - # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it - sigma_threshold = s_min_uncond - if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_threshold*sigma_threshold) ): - uncond = torch.zeros([0,0,uncond.shape[2]]) - x_in=x_in[:x_in.shape[0]//2] - sigma_in=sigma_in[:sigma_in.shape[0]//2] + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: + skip_uncond = True + x_in = x_in[:-batch_size] + sigma_in = sigma_in[:-batch_size] - if tensor.shape[1] == uncond.shape[1]: - if not is_edit_model: - cond_in = torch.cat([tensor, uncond]) - else: + if tensor.shape[1] == uncond.shape[1] or skip_uncond: + if is_edit_model: cond_in = torch.cat([tensor, uncond, uncond]) + elif skip_uncond: + cond_in = tensor + else: + cond_in = torch.cat([tensor, uncond]) if shared.batch_cond_uncond: x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict([cond_in], image_cond_in)) @@ -152,9 +153,15 @@ class CFGDenoiser(torch.nn.Module): x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - if uncond.shape[0]: + if not skip_uncond: x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) + if skip_uncond: + #x_out = torch.cat([x_out, x_out[0:batch_size]]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be + denoised_image_indexes = [x[0][0] for x in conds_list] + fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) + x_out = torch.cat([x_out, fake_uncond]) + denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) cfg_denoised_callback(denoised_params) @@ -165,13 +172,12 @@ class CFGDenoiser(torch.nn.Module): elif opts.live_preview_content == "Negative prompt": sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) - if not is_edit_model: - if uncond.shape[0]: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) - else: - denoised = x_out - else: + if is_edit_model: denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) + elif skip_uncond: + denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0) + else: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) if self.mask is not None: denoised = self.init_latent * self.mask + self.nmask * denoised @@ -221,6 +227,7 @@ class KDiffusionSampler: self.eta = None self.config = None self.last_latent = None + self.s_min_uncond = None self.conditioning_key = sd_model.model.conditioning_key -- cgit v1.2.1 From 737b73a820584b8035fcc37fe35993bec867f326 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 16:05:20 +0300 Subject: some extra lines I forgot to add for previous commit --- modules/sd_samplers_kdiffusion.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index f8aaac59..136aa8e5 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -156,11 +156,10 @@ class CFGDenoiser(torch.nn.Module): if not skip_uncond: x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) + denoised_image_indexes = [x[0][0] for x in conds_list] if skip_uncond: - #x_out = torch.cat([x_out, x_out[0:batch_size]]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be - denoised_image_indexes = [x[0][0] for x in conds_list] fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) - x_out = torch.cat([x_out, fake_uncond]) + x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) cfg_denoised_callback(denoised_params) -- cgit v1.2.1 From 8863b31d83b527d041ca45e23b9af99e2346081a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 16:06:20 +0300 Subject: use correct images for previews when using AND (see #9491) --- modules/sd_samplers_kdiffusion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 136aa8e5..eb98e599 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -167,7 +167,7 @@ class CFGDenoiser(torch.nn.Module): devices.test_for_nans(x_out, "unet") if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(x_out[0:x_out.shape[0]-uncond.shape[0]]) + sd_samplers_common.store_latent(torch.cat([x_out[i:i+1] for i in denoised_image_indexes])) elif opts.live_preview_content == "Negative prompt": sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) -- cgit v1.2.1 From 7428fb5176ccfd203bddcfa30d75c8df5a772cb4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 16:28:51 +0300 Subject: add is_hr_pass field for processing --- modules/processing.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index c50784f4..8d7b2462 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -164,6 +164,7 @@ class StableDiffusionProcessing: self.all_seeds = None self.all_subseeds = None self.iteration = 0 + self.is_hr_pass = False @property @@ -883,6 +884,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if not self.enable_hr: return samples + self.is_hr_pass = True + target_width = self.hr_upscale_to_x target_height = self.hr_upscale_to_y @@ -952,6 +955,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) + self.is_hr_pass = False + return samples -- cgit v1.2.1 From faff08f396f159a5ddd6328a6d2699b7e7d18ef9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 16:48:43 +0300 Subject: rework [batch_number]/[generation_number] filename patterns --- modules/images.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index 9cd17ddd..fd173829 100644 --- a/modules/images.py +++ b/modules/images.py @@ -318,6 +318,7 @@ re_nonletters = re.compile(r'[\s' + string.punctuation + ']+') re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)") re_pattern_arg = re.compile(r"(.*)<([^>]*)>$") max_filename_part_length = 128 +NOTHING_AND_SKIP_PREVIOUS_TEXT = object() def sanitize_filename_part(text, replace_spaces=True): @@ -352,9 +353,9 @@ class FilenameGenerator: 'prompt_no_styles': lambda self: self.prompt_no_style(), 'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False), 'prompt_words': lambda self: self.prompt_words(), - 'batch_number': lambda self: self.p.batch_index + 1, - 'generation_number': lambda self: self.p.iteration * self.p.batch_size + self.p.batch_index + 1, - 'hasprompt': lambda self, *args: self.hasprompt(*args), #accept formats:[hasprompt..] + 'batch_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 else self.p.batch_index + 1, + 'generation_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.n_iter == 1 and self.p.batch_size == 1 else self.p.iteration * self.p.batch_size + self.p.batch_index + 1, + 'hasprompt': lambda self, *args: self.hasprompt(*args), # accepts formats:[hasprompt..] 'clip_skip': lambda self: opts.data["CLIP_stop_at_last_layers"], } default_time_format = '%Y%m%d%H%M%S' @@ -424,12 +425,8 @@ class FilenameGenerator: for m in re_pattern.finditer(x): text, pattern = m.groups() - if pattern is not None and (pattern.lower() == 'batch_number' and self.p.batch_size == 1 or pattern.lower() == 'generation_number' and self.p.n_iter == 1 and self.p.batch_size == 1): - continue - - res += text - if pattern is None: + res += text continue pattern_args = [] @@ -450,11 +447,13 @@ class FilenameGenerator: print(f"Error adding [{pattern}] to filename", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) - if replacement is not None: - res += str(replacement) + if replacement == NOTHING_AND_SKIP_PREVIOUS_TEXT: + continue + elif replacement is not None: + res += text + str(replacement) continue - res += f'[{pattern}]' + res += f'{text}[{pattern}]' return res -- cgit v1.2.1 From 1a50272e7cb470fc78d124c5265b136f2e6f7882 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 17:45:22 +0300 Subject: revert some questionable changes from #9159 --- modules/ui_extra_networks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index ad98f083..aa2f5d1b 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -243,7 +243,7 @@ def create_ui(container, button, tabname): for page in ui.stored_extra_pages: with gr.Tab(page.title, id=page.title.lower().replace(" ", "_")): - page_elem = gr.HTML("") + page_elem = gr.HTML(page.create_html(ui.tabname)) ui.pages.append(page_elem) filter = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", placeholder="Search...", visible=False) @@ -284,7 +284,7 @@ def setup_ui(ui, gallery): def save_preview(index, images, filename): if len(images) == 0: print("There is no image in gallery to save as a preview.") - return ["" for page in ui.stored_extra_pages] + return [page.create_html(ui.tabname) for page in ui.stored_extra_pages] index = int(index) index = 0 if index < 0 else index @@ -309,7 +309,7 @@ def setup_ui(ui, gallery): else: image.save(filename) - return ["" for page in ui.stored_extra_pages] + return [page.create_html(ui.tabname) for page in ui.stored_extra_pages] ui.button_save_preview.click( fn=save_preview, -- cgit v1.2.1 From eabecc21ecd240b63cd4b3996286b74e794ddcea Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 29 Apr 2023 18:20:11 +0300 Subject: Update modules/ui.py Co-authored-by: missionfloyd --- modules/ui.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index 653eb665..43e36a0a 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -766,8 +766,12 @@ def create_ui(): with gr.Tabs(): with gr.Tab(label="Resize to") as tab_scale_to: - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): + res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") with gr.Tab(label="Resize by") as tab_scale_by: scale_by = gr.Slider(minimum=0.05, maximum=4.0, step=0.05, label="Scale", value=1.0, elem_id="img2img_scale") @@ -787,9 +791,6 @@ def create_ui(): tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) - with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"): - res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn") - if opts.dimensions_and_batch_together: with gr.Column(elem_id="img2img_column_batch"): batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") -- cgit v1.2.1 From a95dc02535ce647efbb3bc66964ab61b6f1446f4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 19:05:43 +0300 Subject: remove unwanted changes from #8789 --- modules/script_callbacks.py | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index cede1cb0..17109732 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -94,7 +94,6 @@ callback_map = dict( callbacks_script_unloaded=[], callbacks_before_ui=[], callbacks_on_reload=[], - callbacks_on_polling=[], ) @@ -102,6 +101,7 @@ def clear_callbacks(): for callback_list in callback_map.values(): callback_list.clear() + def app_started_callback(demo: Optional[Blocks], app: FastAPI): for c in callback_map['callbacks_app_started']: try: @@ -109,14 +109,8 @@ def app_started_callback(demo: Optional[Blocks], app: FastAPI): except Exception: report_exception(c, 'app_started_callback') -def app_polling_callback(demo: Optional[Blocks], app: FastAPI): - for c in callback_map['callbacks_on_polling']: - try: - c.callback() - except Exception: - report_exception(c, 'callbacks_on_polling') -def app_reload_callback(demo: Optional[Blocks], app: FastAPI): +def app_reload_callback(): for c in callback_map['callbacks_on_reload']: try: c.callback() @@ -269,14 +263,11 @@ def on_app_started(callback): add_callback(callback_map['callbacks_app_started'], callback) -def on_polling(callback): - """register a function to be called on each polling of the server.""" - add_callback(callback_map['callbacks_on_polling'], callback) - def on_before_reload(callback): """register a function to be called just before the server reloads.""" add_callback(callback_map['callbacks_on_reload'], callback) + def on_model_loaded(callback): """register a function to be called when the stable diffusion model is created; the model is passed as an argument; this function is also called when the script is reloaded. """ -- cgit v1.2.1 From e40b2d947d874ed5504701bfd8e32243e0c456eb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 19:39:22 +0300 Subject: change gradio callback from change to release in a bunch of places now that it's fixed in gradio --- modules/ui.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index 1130345c..a32500d1 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -782,8 +782,9 @@ def create_ui(): with FormRow(): scale_by_html = FormHTML(resize_from_to_html(0, 0, 0.0), elem_id="img2img_scale_resolution_preview") gr.Slider(label="Unused", elem_id="img2img_unused_scale_by_slider") + button_update_resize_to = gr.Button(visible=False, elem_id="img2img_update_resize_to") - scale_by.change( + on_change_args = dict( fn=resize_from_to_html, _js="currentImg2imgSourceResolution", inputs=[dummy_component, dummy_component, scale_by], @@ -791,6 +792,12 @@ def create_ui(): show_progress=False, ) + scale_by.release(**on_change_args) + button_update_resize_to.click(**on_change_args) + + for component in img2img_image_inputs: + component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) + tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab]) @@ -1647,7 +1654,8 @@ def create_ui(): component = component_dict[k] info = opts.data_labels[k] - component.change( + change_handler = component.release if hasattr(component, 'release') else component.change + change_handler( fn=lambda value, k=k: run_settings_single(value, key=k), inputs=[component], outputs=[component, text_settings], -- cgit v1.2.1 From 90e465982270b85812c94961696b7ba4d2c4d38c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 20:28:18 +0300 Subject: bump gradio to 3.28.1 --- modules/api/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 9ffcbd5f..cdbdce32 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -130,8 +130,8 @@ def api_middleware(app: FastAPI): "body": vars(e).get('body', ''), "errors": str(e), } - print(f"API error: {request.method}: {request.url} {err}") if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions + print(f"API error: {request.method}: {request.url} {err}") if rich_available: console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200])) else: -- cgit v1.2.1 From bd9700405a0686769b58437fd87d9106d3cd1346 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 22:15:20 +0300 Subject: Revert "Merge pull request #7595 from siutin/feature/restore-progress" This reverts commit 80987c36f9bfa33092ac7c75624b25d839cb2a06, reversing changes made to 2e78e65a22bfa6b116ae18d12fdcb85ec8acd727. --- modules/call_queue.py | 18 ++++-------- modules/progress.py | 76 ++++++--------------------------------------------- modules/ui.py | 35 +++--------------------- 3 files changed, 17 insertions(+), 112 deletions(-) (limited to 'modules') diff --git a/modules/call_queue.py b/modules/call_queue.py index 43f6ebe0..92097c15 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -4,16 +4,10 @@ import threading import traceback import time -import gradio as gr from modules import shared, progress queue_lock = threading.Lock() -queue_lock_condition = threading.Condition(lock=queue_lock) -def wrap_session_call(func): - def f(request: gr.Request, *args, **kwargs): - return func(request, *args, **kwargs) - return f def wrap_queued_call(func): def f(*args, **kwargs): @@ -26,31 +20,29 @@ def wrap_queued_call(func): def wrap_gradio_gpu_call(func, extra_outputs=None): - def f(request: gr.Request, *args, **kwargs): - user = request.username + def f(*args, **kwargs): # if the first argument is a string that says "task(...)", it is treated as a job id if len(args) > 0 and type(args[0]) == str and args[0][0:5] == "task(" and args[0][-1] == ")": id_task = args[0] - progress.add_task_to_queue(user, id_task) + progress.add_task_to_queue(id_task) else: id_task = None with queue_lock: shared.state.begin() - progress.start_task(user, id_task) + progress.start_task(id_task) try: res = func(*args, **kwargs) finally: - progress.finish_task(user, id_task) - progress.set_last_task_result(user, id_task, res) + progress.finish_task(id_task) shared.state.end() return res - return wrap_session_call(wrap_gradio_call(f, extra_outputs=extra_outputs, add_stats=True)) + return wrap_gradio_call(f, extra_outputs=extra_outputs, add_stats=True) def wrap_gradio_call(func, extra_outputs=None, add_stats=False): diff --git a/modules/progress.py b/modules/progress.py index 13568701..c69ecf3d 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -4,84 +4,38 @@ import time import gradio as gr from pydantic import BaseModel, Field -from typing import Optional -from fastapi import Depends, Security -from fastapi.security import APIKeyCookie -from modules import call_queue from modules.shared import opts import modules.shared as shared -current_task_user = None current_task = None pending_tasks = {} finished_tasks = [] -def start_task(user, id_task): +def start_task(id_task): global current_task - global current_task_user - current_task_user = user current_task = id_task - pending_tasks.pop((user, id_task), None) + pending_tasks.pop(id_task, None) -def finish_task(user, id_task): +def finish_task(id_task): global current_task - global current_task_user if current_task == id_task: current_task = None - if current_task_user == user: - current_task_user = None - - finished_tasks.append((user, id_task)) + finished_tasks.append(id_task) if len(finished_tasks) > 16: finished_tasks.pop(0) -def add_task_to_queue(user, id_job): - pending_tasks[(user, id_job)] = time.time() - -last_task_id = None -last_task_result = None -last_task_user = None - -def set_last_task_result(user, id_job, result): - - global last_task_id - global last_task_result - global last_task_user - - last_task_id = id_job - last_task_result = result - last_task_user = user - - -def restore_progress_call(request: gr.Request): - if current_task is None: - - # image, generation_info, html_info, html_log - return tuple(list([None, None, None, None])) - - else: - user = request.username +def add_task_to_queue(id_job): + pending_tasks[id_job] = time.time() - if current_task_user == user: - t_task = current_task - with call_queue.queue_lock_condition: - call_queue.queue_lock_condition.wait_for(lambda: t_task == last_task_id) - - return last_task_result - - return tuple(list([None, None, None, None])) - -class CurrentTaskResponse(BaseModel): - current_task: str = Field(default=None, title="Task ID", description="id of the current progress task") class ProgressRequest(BaseModel): id_task: str = Field(default=None, title="Task ID", description="id of the task to get progress for") @@ -102,21 +56,6 @@ class ProgressResponse(BaseModel): def setup_progress_api(app): return app.add_api_route("/internal/progress", progressapi, methods=["POST"], response_model=ProgressResponse) -def setup_current_task_api(app): - - def get_current_user(token: Optional[str] = Security(APIKeyCookie(name="access-token", auto_error=False))): - return None if token is None else app.tokens.get(token) - - def current_task_api(current_user: str = Depends(get_current_user)): - - if app.auth is None or current_task_user == current_user: - current_user_task = current_task - else: - current_user_task = None - - return CurrentTaskResponse(current_task=current_user_task) - - return app.add_api_route("/internal/current_task", current_task_api, methods=["GET"], response_model=CurrentTaskResponse) def progressapi(req: ProgressRequest): active = req.id_task == current_task @@ -156,4 +95,5 @@ def progressapi(req: ProgressRequest): else: live_preview = None - return ProgressResponse(active=active, queued=queued, completed=completed, progress=progress, eta=eta, live_preview=live_preview, id_live_preview=id_live_preview, textinfo=shared.state.textinfo) \ No newline at end of file + return ProgressResponse(active=active, queued=queued, completed=completed, progress=progress, eta=eta, live_preview=live_preview, id_live_preview=id_live_preview, textinfo=shared.state.textinfo) + diff --git a/modules/ui.py b/modules/ui.py index cc3c8d35..a32500d1 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -41,7 +41,6 @@ from modules.textual_inversion import textual_inversion import modules.hypernetworks.ui from modules.generation_parameters_copypaste import image_from_url_text import modules.extras -from modules.progress import restore_progress_call warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning) @@ -82,7 +81,6 @@ apply_style_symbol = '\U0001f4cb' # 📋 clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ extra_networks_symbol = '\U0001F3B4' # 🎴 switch_values_symbol = '\U000021C5' # ⇅ -restore_progress_symbol = '\U0001F300' # 🌀 def plaintext_to_html(text): @@ -327,7 +325,6 @@ def create_toprow(is_img2img): extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") - restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress") token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") @@ -345,7 +342,7 @@ def create_toprow(is_img2img): prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button + return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button def setup_progressbar(*args, **kwargs): @@ -462,7 +459,7 @@ def create_ui(): modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) + txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) @@ -594,18 +591,6 @@ def create_ui(): res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False) - restore_progress_button.click( - fn=restore_progress_call, - _js="() => restoreProgress('txt2img')", - inputs=[], - outputs=[ - txt2img_gallery, - generation_info, - html_info, - html_log, - ] - ) - txt_prompt_img.change( fn=modules.images.image_data, inputs=[ @@ -674,7 +659,7 @@ def create_ui(): modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) + img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=True) img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) @@ -966,18 +951,6 @@ def create_ui(): submit.click(**img2img_args) res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False) - restore_progress_button.click( - fn=restore_progress_call, - _js="() => restoreProgress('img2img')", - inputs=[], - outputs=[ - img2img_gallery, - generation_info, - html_info, - html_log, - ] - ) - img2img_interrogate.click( fn=lambda *args: process_interrogate(interrogate, *args), **interrogate_args, @@ -1574,7 +1547,7 @@ def create_ui(): gr.HTML(shared.html("licenses.html"), elem_id="licenses") gr.Button(value="Show all pages", elem_id="settings_show_all_pages") - + def unload_sd_weights(): modules.sd_models.unload_model_weights() -- cgit v1.2.1 From c48ab36cb9e0120c6f1779bee9e875bee8f903f5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Apr 2023 22:16:54 +0300 Subject: alternate restore progress button implementation --- modules/call_queue.py | 1 + modules/progress.py | 18 ++++++++++++++++++ modules/ui.py | 36 ++++++++++++++++++++++++++++++++---- 3 files changed, 51 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/call_queue.py b/modules/call_queue.py index 92097c15..1829f3a6 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -35,6 +35,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None): try: res = func(*args, **kwargs) + progress.record_results(id_task, res) finally: progress.finish_task(id_task) diff --git a/modules/progress.py b/modules/progress.py index c69ecf3d..5655346b 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -13,6 +13,8 @@ import modules.shared as shared current_task = None pending_tasks = {} finished_tasks = [] +recorded_results = [] +recorded_results_limit = 2 def start_task(id_task): @@ -33,6 +35,12 @@ def finish_task(id_task): finished_tasks.pop(0) +def record_results(id_task, res): + recorded_results.append((id_task, res)) + if len(recorded_results) > recorded_results_limit: + recorded_results.pop(0) + + def add_task_to_queue(id_job): pending_tasks[id_job] = time.time() @@ -97,3 +105,13 @@ def progressapi(req: ProgressRequest): return ProgressResponse(active=active, queued=queued, completed=completed, progress=progress, eta=eta, live_preview=live_preview, id_live_preview=id_live_preview, textinfo=shared.state.textinfo) + +def restore_progress(id_task): + while id_task == current_task or id_task in pending_tasks: + time.sleep(0.1) + + res = next(iter([x[1] for x in recorded_results if id_task == x[0]]), None) + if res is not None: + return res + + return gr.update(), gr.update(), gr.update(), f"Couldn't restore progress for {id_task}: results either have been discarded or never were obtained" diff --git a/modules/ui.py b/modules/ui.py index a32500d1..9ff4bcd9 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -19,7 +19,7 @@ import numpy as np from PIL import Image, PngImagePlugin from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing +from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton, FormHTML from modules.paths import script_path, data_path @@ -81,6 +81,7 @@ apply_style_symbol = '\U0001f4cb' # 📋 clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️ extra_networks_symbol = '\U0001F3B4' # 🎴 switch_values_symbol = '\U000021C5' # ⇅ +restore_progress_symbol = '\U0001F300' # 🌀 def plaintext_to_html(text): @@ -325,6 +326,7 @@ def create_toprow(is_img2img): extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks") prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") + restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{id_part}_restore_progress", visible=False) token_counter = gr.HTML(value="0/75", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"]) token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") @@ -342,7 +344,7 @@ def create_toprow(is_img2img): prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True) create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles") - return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button + return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button def setup_progressbar(*args, **kwargs): @@ -459,7 +461,7 @@ def create_ui(): modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=False) + txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False) @@ -591,6 +593,19 @@ def create_ui(): res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False) + restore_progress_button.click( + fn=progress.restore_progress, + _js="restoreProgressTxt2img", + inputs=[dummy_component], + outputs=[ + txt2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + txt_prompt_img.change( fn=modules.images.image_data, inputs=[ @@ -659,7 +674,7 @@ def create_ui(): modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=True) + img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button, restore_progress_button = create_toprow(is_img2img=True) img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False) @@ -951,6 +966,19 @@ def create_ui(): submit.click(**img2img_args) res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False) + restore_progress_button.click( + fn=progress.restore_progress, + _js="restoreProgressImg2img", + inputs=[dummy_component], + outputs=[ + img2img_gallery, + generation_info, + html_info, + html_log, + ], + show_progress=False, + ) + img2img_interrogate.click( fn=lambda *args: process_interrogate(interrogate, *args), **interrogate_args, -- cgit v1.2.1 From 0d1ef296b9f7c4e78060ce167bb784246fa09de1 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 1 May 2023 05:22:53 +0900 Subject: checkpoint override enhancement --- modules/processing.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index a48fff99..e8808beb 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -498,6 +498,11 @@ def process_images(p: StableDiffusionProcessing) -> Processed: stored_opts = {k: opts.data[k] for k in p.override_settings.keys()} try: + # if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint + if sd_models.checkpoint_alisases.get(p.override_settings.get('sd_model_checkpoint')) is None: + p.override_settings.pop('sd_model_checkpoint', None) + sd_models.reload_model_weights() + for k, v in p.override_settings.items(): setattr(opts, k, v) @@ -514,8 +519,6 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if p.override_settings_restore_afterwards: for k, v in stored_opts.items(): setattr(opts, k, v) - if k == 'sd_model_checkpoint': - sd_models.reload_model_weights() if k == 'sd_vae': sd_vae.reload_vae_weights() -- cgit v1.2.1 From f15b7e52e33de1f0e2fcd99c9d4204ebb0fef536 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 1 May 2023 13:47:46 +0300 Subject: Add a comment and partial fix for the issue when the inpaint UI is unresponsive after using it. --- modules/ui.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index 9ff4bcd9..7b45f131 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -810,7 +810,10 @@ def create_ui(): scale_by.release(**on_change_args) button_update_resize_to.click(**on_change_args) - for component in img2img_image_inputs: + # the code below is meant to update the resolution label after the image in the image selection UI has changed. + # as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests. + # I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs. + for component in [init_img, sketch]: component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False) tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab]) -- cgit v1.2.1 From b1717c0a4804f8ed3bb8cc2f3aea5d095778b447 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 2 May 2023 09:08:00 +0300 Subject: do not load wait for shared.sd_model to load at startup --- modules/sd_models.py | 54 ++++++++++++++++++++++++++++++++++++++-------------- modules/shared.py | 31 ++++++++++++++++++++++++++---- modules/ui.py | 10 ++++------ 3 files changed, 71 insertions(+), 24 deletions(-) (limited to 'modules') diff --git a/modules/sd_models.py b/modules/sd_models.py index 4f7613a1..59adc7cc 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -2,6 +2,8 @@ import collections import os.path import sys import gc +import threading + import torch import re import safetensors.torch @@ -404,13 +406,39 @@ def repair_config(sd_config): sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight' sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight' -def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_to_load_state_dict=None): + +class SdModelData: + def __init__(self): + self.sd_model = None + self.lock = threading.Lock() + + def get_sd_model(self): + if self.sd_model is None: + with self.lock: + try: + load_model() + except Exception as e: + errors.display(e, "loading stable diffusion model") + print("", file=sys.stderr) + print("Stable diffusion model failed to load", file=sys.stderr) + self.sd_model = None + + return self.sd_model + + def set_sd_model(self, v): + self.sd_model = v + + +model_data = SdModelData() + + +def load_model(checkpoint_info=None, already_loaded_state_dict=None): from modules import lowvram, sd_hijack checkpoint_info = checkpoint_info or select_checkpoint() - if shared.sd_model: - sd_hijack.model_hijack.undo_hijack(shared.sd_model) - shared.sd_model = None + if model_data.sd_model: + sd_hijack.model_hijack.undo_hijack(model_data.sd_model) + model_data.sd_model = None gc.collect() devices.torch_gc() @@ -464,7 +492,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_ timer.record("hijack") sd_model.eval() - shared.sd_model = sd_model + model_data.sd_model = sd_model sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model @@ -484,7 +512,7 @@ def reload_model_weights(sd_model=None, info=None): checkpoint_info = info or select_checkpoint() if not sd_model: - sd_model = shared.sd_model + sd_model = model_data.sd_model if sd_model is None: # previous model load failed current_checkpoint_info = None @@ -512,7 +540,7 @@ def reload_model_weights(sd_model=None, info=None): del sd_model checkpoints_loaded.clear() load_model(checkpoint_info, already_loaded_state_dict=state_dict) - return shared.sd_model + return model_data.sd_model try: load_model_weights(sd_model, checkpoint_info, state_dict, timer) @@ -535,17 +563,15 @@ def reload_model_weights(sd_model=None, info=None): return sd_model + def unload_model_weights(sd_model=None, info=None): from modules import lowvram, devices, sd_hijack timer = Timer() - if shared.sd_model: - - # shared.sd_model.cond_stage_model.to(devices.cpu) - # shared.sd_model.first_stage_model.to(devices.cpu) - shared.sd_model.to(devices.cpu) - sd_hijack.model_hijack.undo_hijack(shared.sd_model) - shared.sd_model = None + if model_data.sd_model: + model_data.sd_model.to(devices.cpu) + sd_hijack.model_hijack.undo_hijack(model_data.sd_model) + model_data.sd_model = None sd_model = None gc.collect() devices.torch_gc() diff --git a/modules/shared.py b/modules/shared.py index 6a2b3c2b..151bab9e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -16,6 +16,7 @@ import modules.styles import modules.devices as devices from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir +from ldm.models.diffusion.ddpm import LatentDiffusion demo = None @@ -600,13 +601,37 @@ class Options: return value - opts = Options() if os.path.exists(config_filename): opts.load(config_filename) + +class Shared(sys.modules[__name__].__class__): + """ + this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than + at program startup. + """ + + sd_model_val = None + + @property + def sd_model(self): + import modules.sd_models + + return modules.sd_models.model_data.get_sd_model() + + @sd_model.setter + def sd_model(self, value): + import modules.sd_models + + modules.sd_models.model_data.set_sd_model(value) + + +sd_model: LatentDiffusion = None # this var is here just for IDE's type checking; it cannot be accessed because the class field above will be accessed instead +sys.modules[__name__].__class__ = Shared + settings_components = None -"""assinged from ui.py, a mapping on setting anmes to gradio components repsponsible for those settings""" +"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" latent_upscale_default_mode = "Latent" latent_upscale_modes = { @@ -620,8 +645,6 @@ latent_upscale_modes = { sd_upscalers = [] -sd_model = None - clip_model = None progress_print_out = sys.stdout diff --git a/modules/ui.py b/modules/ui.py index 7b45f131..16c46515 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -828,7 +828,7 @@ def create_ui(): with FormGroup(): with FormRow(): cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") - image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") + image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=False) denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") elif category == "seed": @@ -1693,11 +1693,9 @@ def create_ui(): show_progress=info.refresh is not None, ) - text_settings.change( - fn=lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit"), - inputs=[], - outputs=[image_cfg_scale], - ) + update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") + text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) + demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale]) button_set_checkpoint = gr.Button('Change checkpoint', elem_id='change_checkpoint', visible=False) button_set_checkpoint.click( -- cgit v1.2.1 From 5ab7f213bec2f816f9c5644becb32eb72c8ffb89 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 2 May 2023 09:20:35 +0300 Subject: fix an error that prevents running webui on torch<2.0 without --disable-safe-unpickle --- modules/safe.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/safe.py b/modules/safe.py index dadf319c..e6c2f2c0 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -24,7 +24,11 @@ class RestrictedUnpickler(pickle.Unpickler): def persistent_load(self, saved_id): assert saved_id[0] == 'storage' - return TypedStorage(_internal=True) + + try: + return TypedStorage(_internal=True) + except TypeError: + return TypedStorage() # PyTorch before 2.0 does not have the _internal argument def find_class(self, module, name): if self.extra_handler is not None: -- cgit v1.2.1 From 14e55a330146bda01f883a79e3900314a79eb22c Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 3 May 2023 14:28:59 +0900 Subject: print PIL.UnidentifiedImageError --- modules/img2img.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/img2img.py b/modules/img2img.py index 56c846d6..9fc3a698 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -48,7 +48,8 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): try: img = Image.open(image) - except UnidentifiedImageError: + except UnidentifiedImageError as e: + print(e) continue # Use the EXIF orientation of photos taken by smartphones. img = ImageOps.exif_transpose(img) -- cgit v1.2.1 From e960781511eb175943be09b314ac2be46b6fc684 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Wed, 3 May 2023 13:12:43 -0500 Subject: fix maximum downsampling option --- modules/generation_parameters_copypaste.py | 4 +++- modules/processing.py | 1 + modules/shared.py | 5 +---- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'modules') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 34c1b860..83382e93 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -306,6 +306,8 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model res["Token merging stride x"] = '2' if res.get("Token merging stride y", None) is None: res["Token merging stride y"] = '2' + if res.get("Token merging maximum down sampling", None) is None: + res["Token merging maximum down sampling"] = '1' restore_old_hires_fix_params(res) @@ -341,7 +343,7 @@ infotext_to_setting_name_mapping = [ ('Token merging merge attention', 'token_merging_merge_attention'), ('Token merging merge cross attention', 'token_merging_merge_cross_attention'), ('Token merging merge mlp', 'token_merging_merge_mlp'), - ('Token merging maximum downsampling', 'token_merging_maximum_downsampling'), + ('Token merging maximum down sampling', 'token_merging_maximum_down_sampling'), ('Token merging stride x', 'token_merging_stride_x'), ('Token merging stride y', 'token_merging_stride_y'), ('RNG', 'randn_source'), diff --git a/modules/processing.py b/modules/processing.py index d5d1da5a..6807a301 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -495,6 +495,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Token merging merge mlp": None if opts.token_merging_merge_mlp is False else opts.token_merging_merge_mlp, "Token merging stride x": None if opts.token_merging_stride_x == 2 else opts.token_merging_stride_x, "Token merging stride y": None if opts.token_merging_stride_y == 2 else opts.token_merging_stride_y, + "Token merging maximum down sampling": None if opts.token_merging_maximum_down_sampling == 1 else opts.token_merging_maximum_down_sampling, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, diff --git a/modules/shared.py b/modules/shared.py index 7b81ffc9..a7a72dd5 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -488,10 +488,7 @@ options_templates.update(options_section(('token_merging', 'Token Merging'), { False, "Merge mlp", gr.Checkbox ), - "token_merging_maximum_down_sampling": OptionInfo( - 1, "Maximum down sampling", - gr.Dropdown, lambda: {"choices": ["1", "2", "4", "8"]} - ), + "token_merging_maximum_down_sampling": OptionInfo(1, "Maximum down sampling", gr.Radio, lambda: {"choices": ['1', '2', '4', '8']}), "token_merging_stride_x": OptionInfo( 2, "Stride - X", gr.Slider, {"minimum": 2, "maximum": 8, "step": 2} -- cgit v1.2.1 From f0efc8c211fc2d2c2f8caf6e2f92501922d18c99 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Wed, 3 May 2023 21:10:31 -0500 Subject: not being cast properly every time, swap to ints --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index a7a72dd5..eb06909c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -488,7 +488,7 @@ options_templates.update(options_section(('token_merging', 'Token Merging'), { False, "Merge mlp", gr.Checkbox ), - "token_merging_maximum_down_sampling": OptionInfo(1, "Maximum down sampling", gr.Radio, lambda: {"choices": ['1', '2', '4', '8']}), + "token_merging_maximum_down_sampling": OptionInfo(1, "Maximum down sampling", gr.Radio, lambda: {"choices": [1, 2, 4, 8]}), "token_merging_stride_x": OptionInfo( 2, "Stride - X", gr.Slider, {"minimum": 2, "maximum": 8, "step": 2} -- cgit v1.2.1 From 1bebb50da977dfb93d78ae161e0f28d332f408ea Mon Sep 17 00:00:00 2001 From: Acncagua Slt Date: Thu, 4 May 2023 11:59:22 +0900 Subject: No double calls will be made Do not call load_upscalers in list_builtin_upscalers --- modules/modelloader.py | 3 --- 1 file changed, 3 deletions(-) (limited to 'modules') diff --git a/modules/modelloader.py b/modules/modelloader.py index 522affc6..2bfdc1df 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -133,12 +133,9 @@ forbidden_upscaler_classes = set() def list_builtin_upscalers(): - load_upscalers() - builtin_upscaler_classes.clear() builtin_upscaler_classes.extend(Upscaler.__subclasses__()) - def forbid_loaded_nonbuiltin_upscalers(): for cls in Upscaler.__subclasses__(): if cls not in builtin_upscaler_classes: -- cgit v1.2.1 From 29e13867bff5a00388e5eb1dd04c16c6e8f33e4f Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Thu, 4 May 2023 14:08:20 +0800 Subject: Revert "Refresh bug fix" This reverts commit eff00413ae76e01b9a24b13dee6a0dda98f643f2. --- modules/img2img.py | 3 +-- modules/txt2img.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/img2img.py b/modules/img2img.py index fea51667..6da974e4 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -181,8 +181,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s print(generation_info_js) for img in processed.images: - if hasattr(img, 'already_saved_as'): - img.already_saved_as += f'?{int(time())}' + img.already_saved_as += f'?{int(time())}' if opts.do_not_show_images: processed.images = [] diff --git a/modules/txt2img.py b/modules/txt2img.py index 57278bdc..6ebca656 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -65,8 +65,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step print(generation_info_js) for img in processed.images: - if hasattr(img, 'already_saved_as'): - img.already_saved_as += f'?{int(time())}' + img.already_saved_as += f'?{int(time())}' if opts.do_not_show_images: processed.images = [] -- cgit v1.2.1 From 35e5916af9ccf1661c6b262b768d4b241ab9eee7 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Thu, 4 May 2023 14:08:21 +0800 Subject: Revert "Add img2img refreshed correctly" This reverts commit 988dd02632bf38e64bc0cf394ece2a5f7a64e15b. --- modules/img2img.py | 4 ---- 1 file changed, 4 deletions(-) (limited to 'modules') diff --git a/modules/img2img.py b/modules/img2img.py index 6da974e4..953ac5d2 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -15,7 +15,6 @@ import modules.processing as processing from modules.ui import plaintext_to_html import modules.images as images import modules.scripts -from time import time def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): @@ -180,9 +179,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s if opts.samples_log_stdout: print(generation_info_js) - for img in processed.images: - img.already_saved_as += f'?{int(time())}' - if opts.do_not_show_images: processed.images = [] -- cgit v1.2.1 From 5c66fedb641841958020c0f8693fc69dee7a96a1 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Thu, 4 May 2023 14:08:22 +0800 Subject: Revert "Fix gallery not being refreshed correctly" This reverts commit 2c24e09dfc431ef7617134a807bf741f0b0c9fa3. --- modules/txt2img.py | 4 ---- 1 file changed, 4 deletions(-) (limited to 'modules') diff --git a/modules/txt2img.py b/modules/txt2img.py index 6ebca656..16841d0f 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -7,7 +7,6 @@ from modules.shared import opts, cmd_opts import modules.shared as shared import modules.processing as processing from modules.ui import plaintext_to_html -from time import time def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, override_settings_texts, *args): @@ -64,9 +63,6 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step if opts.samples_log_stdout: print(generation_info_js) - for img in processed.images: - img.already_saved_as += f'?{int(time())}' - if opts.do_not_show_images: processed.images = [] -- cgit v1.2.1 From 91a15dca80f44e32461f0ab7219de632781a2622 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Thu, 4 May 2023 14:38:15 +0800 Subject: Use a new way to solve webpage refresh --- modules/ui_tempdir.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 21945235..82b9c4e4 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -2,6 +2,7 @@ import os import tempfile from collections import namedtuple from pathlib import Path +from time import time import gradio as gr @@ -34,7 +35,7 @@ def check_tmp_file(gradio, filename): def save_pil_to_file(pil_image, dir=None): already_saved_as = getattr(pil_image, 'already_saved_as', None) if already_saved_as and os.path.isfile(already_saved_as): - register_tmp_file(shared.demo, already_saved_as) + register_tmp_file(shared.demo, f'{already_saved_as}?{int(time())}') file_obj = Savedfile(already_saved_as) return file_obj -- cgit v1.2.1 From 8bc4a3a2a8d6ad9b439cb90d52600cc50e9b4f42 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Thu, 4 May 2023 15:55:57 +0800 Subject: Refresh fix --- modules/ui_tempdir.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 82b9c4e4..42a85d3b 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -35,7 +35,8 @@ def check_tmp_file(gradio, filename): def save_pil_to_file(pil_image, dir=None): already_saved_as = getattr(pil_image, 'already_saved_as', None) if already_saved_as and os.path.isfile(already_saved_as): - register_tmp_file(shared.demo, f'{already_saved_as}?{int(time())}') + already_saved_as += f'?{int(time())}' + register_tmp_file(shared.demo, already_saved_as) file_obj = Savedfile(already_saved_as) return file_obj -- cgit v1.2.1 From a3cdf9aaf85399d6ddfb5bc3245d8f154802fe58 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Fri, 5 May 2023 15:51:01 +0800 Subject: Reopen image fix --- modules/generation_parameters_copypaste.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 6df76858..ae855627 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -59,6 +59,7 @@ def image_from_url_text(filedata): is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename) assert is_in_right_dir, 'trying to open image file outside of allowed directories' + filename = filename.rsplit('?', 1)[0] return Image.open(filename) if type(filedata) == list: -- cgit v1.2.1 From cde0d642f34b2e008159eaeafb870d5efd1a3315 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 6 May 2023 02:20:33 +0900 Subject: add denoising strength filename pattern --- modules/images.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index fd173829..6ceb7c7c 100644 --- a/modules/images.py +++ b/modules/images.py @@ -357,6 +357,7 @@ class FilenameGenerator: 'generation_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.n_iter == 1 and self.p.batch_size == 1 else self.p.iteration * self.p.batch_size + self.p.batch_index + 1, 'hasprompt': lambda self, *args: self.hasprompt(*args), # accepts formats:[hasprompt..] 'clip_skip': lambda self: opts.data["CLIP_stop_at_last_layers"], + 'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT, } default_time_format = '%Y%m%d%H%M%S' -- cgit v1.2.1 From a46c23b10f972ee235e282e7d79de2e9e7a91d68 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 5 May 2023 22:48:27 -0600 Subject: Make gamepad navigation optional --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 6a2b3c2b..977ff16b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -399,6 +399,7 @@ options_templates.update(options_section(('ui', "User interface"), { "font": OptionInfo("", "Font for image grids that have text"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), + "js_modal_lightbox_gamepad": OptionInfo(True, "Navagete image viewer with gamepad"), "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"), "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row"), -- cgit v1.2.1 From 5cbc1c5d438e9ca7384a26eab28a09f44c5162f2 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 5 May 2023 23:03:32 -0600 Subject: Fix spelling --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 977ff16b..b3ca8401 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -399,7 +399,7 @@ options_templates.update(options_section(('ui', "User interface"), { "font": OptionInfo("", "Font for image grids that have text"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), - "js_modal_lightbox_gamepad": OptionInfo(True, "Navagete image viewer with gamepad"), + "js_modal_lightbox_gamepad": OptionInfo(True, "Navigate image viewer with gamepad"), "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"), "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row"), -- cgit v1.2.1 From 99f3bf07d2976211eed81a9293a447c7ead2d893 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sat, 6 May 2023 22:16:51 -0600 Subject: gamepad repeat option --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index b3ca8401..d8d2bc78 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -400,6 +400,7 @@ options_templates.update(options_section(('ui', "User interface"), { "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), "js_modal_lightbox_gamepad": OptionInfo(True, "Navigate image viewer with gamepad"), + "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"), "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"), "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row"), -- cgit v1.2.1 From 669b518cbd2b9e8f5d7852a89627200f87c649d2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 08:36:24 +0300 Subject: add mtime to served images in gallery to prevent cache from showing old images --- modules/ui_tempdir.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 21945235..9cb4954a 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -36,7 +36,7 @@ def save_pil_to_file(pil_image, dir=None): if already_saved_as and os.path.isfile(already_saved_as): register_tmp_file(shared.demo, already_saved_as) - file_obj = Savedfile(already_saved_as) + file_obj = Savedfile(f"{already_saved_as}?{os.path.getmtime(already_saved_as)}") return file_obj if shared.opts.temp_dir != "": -- cgit v1.2.1 From 083dc3c76ab7dbc7b2b04f3396d4f5280b002906 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 11:33:45 +0300 Subject: directory hiding for extra networks: dirs starting with . will hide their cards on extra network tabs unless specifically searched for create HTML for extra network pages only on demand allow directories starting with . to still list their models for lora, checkpoints, etc keep "search" filter for extra networks when user refreshes the page --- modules/modelloader.py | 27 +++++------------ modules/shared.py | 17 +++++++++++ modules/ui_extra_networks.py | 69 +++++++++++++++++++++++++++++++------------- 3 files changed, 74 insertions(+), 39 deletions(-) (limited to 'modules') diff --git a/modules/modelloader.py b/modules/modelloader.py index 522affc6..f2274488 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -22,9 +22,6 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None """ output = [] - if ext_filter is None: - ext_filter = [] - try: places = [] @@ -39,22 +36,14 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None places.append(model_path) for place in places: - if os.path.exists(place): - for file in glob.iglob(place + '**/**', recursive=True): - full_path = file - if os.path.isdir(full_path): - continue - if os.path.islink(full_path) and not os.path.exists(full_path): - print(f"Skipping broken symlink: {full_path}") - continue - if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]): - continue - if len(ext_filter) != 0: - model_name, extension = os.path.splitext(file) - if extension not in ext_filter: - continue - if file not in output: - output.append(full_path) + for full_path in shared.walk_files(place, allowed_extensions=ext_filter): + if os.path.islink(full_path) and not os.path.exists(full_path): + print(f"Skipping broken symlink: {full_path}") + continue + if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]): + continue + if full_path not in output: + output.append(full_path) if model_url is not None and len(output) == 0: if download_name is not None: diff --git a/modules/shared.py b/modules/shared.py index 91aac1a3..dd374713 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -726,3 +726,20 @@ def html(filename): return file.read() return "" + + +def walk_files(path, allowed_extensions=None): + if not os.path.exists(path): + return + + if allowed_extensions is not None: + allowed_extensions = set(allowed_extensions) + + for root, dirs, files in os.walk(path): + for filename in files: + if allowed_extensions is not None: + _, ext = os.path.splitext(filename) + if ext not in allowed_extensions: + continue + + yield os.path.join(root, filename) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index aa2f5d1b..86c05a55 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -89,19 +89,22 @@ class ExtraNetworksPage: subdirs = {} for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]: - for x in glob.glob(os.path.join(parentdir, '**/*'), recursive=True): - if not os.path.isdir(x): - continue + for root, dirs, files in os.walk(parentdir): + for dirname in dirs: + x = os.path.join(root, dirname) - subdir = os.path.abspath(x)[len(parentdir):].replace("\\", "/") - while subdir.startswith("/"): - subdir = subdir[1:] + if not os.path.isdir(x): + continue - is_empty = len(os.listdir(x)) == 0 - if not is_empty and not subdir.endswith("/"): - subdir = subdir + "/" + subdir = os.path.abspath(x)[len(parentdir):].replace("\\", "/") + while subdir.startswith("/"): + subdir = subdir[1:] - subdirs[subdir] = 1 + is_empty = len(os.listdir(x)) == 0 + if not is_empty and not subdir.endswith("/"): + subdir = subdir + "/" + + subdirs[subdir] = 1 if subdirs: subdirs = {"": 1, **subdirs} @@ -157,8 +160,20 @@ class ExtraNetworksPage: if metadata: metadata_button = f"" + local_path = "" + filename = item.get("filename", "") + for reldir in self.allowed_directories_for_previews(): + absdir = os.path.abspath(reldir) + + if filename.startswith(absdir): + local_path = filename[len(absdir):] + + # if this is true, the item must not be show in the default view, and must instead only be + # shown when searching for it + serach_only = "/." in local_path or "\\." in local_path + args = { - "style": f"'{height}{width}{background_image}'", + "style": f"'display: none; {height}{width}{background_image}'", "prompt": item.get("prompt", None), "tabname": json.dumps(tabname), "local_preview": json.dumps(item["local_preview"]), @@ -168,6 +183,7 @@ class ExtraNetworksPage: "save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {json.dumps(tabname)}, {json.dumps(item["local_preview"])})""") + '"', "search_term": item.get("search_term", ""), "metadata_button": metadata_button, + "serach_only": " search_only" if serach_only else "", } return self.card_page.format(**args) @@ -209,6 +225,11 @@ def intialize(): class ExtraNetworksUi: def __init__(self): self.pages = None + """gradio HTML components related to extra networks' pages""" + + self.page_contents = None + """HTML content of the above; empty initially, filled when extra pages have to be shown""" + self.stored_extra_pages = None self.button_save_preview = None @@ -236,17 +257,22 @@ def pages_in_preferred_order(pages): def create_ui(container, button, tabname): ui = ExtraNetworksUi() ui.pages = [] + ui.pages_contents = [] ui.stored_extra_pages = pages_in_preferred_order(extra_pages.copy()) ui.tabname = tabname with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs: for page in ui.stored_extra_pages: - with gr.Tab(page.title, id=page.title.lower().replace(" ", "_")): + page_id = page.title.lower().replace(" ", "_") - page_elem = gr.HTML(page.create_html(ui.tabname)) + with gr.Tab(page.title, id=page_id): + elem_id = f"{tabname}_{page_id}_cards_html" + page_elem = gr.HTML('', elem_id=elem_id) ui.pages.append(page_elem) - filter = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", placeholder="Search...", visible=False) + page_elem.change(fn=lambda: None, _js='function(){applyExtraNetworkFilter(' + json.dumps(tabname) + '); return []}', inputs=[], outputs=[]) + + gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", placeholder="Search...", visible=False) button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh") ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False) @@ -254,19 +280,22 @@ def create_ui(container, button, tabname): def toggle_visibility(is_visible): is_visible = not is_visible - return is_visible, gr.update(visible=is_visible), gr.update(variant=("secondary-down" if is_visible else "secondary")) + + if is_visible and not ui.pages_contents: + refresh() + + return is_visible, gr.update(visible=is_visible), gr.update(variant=("secondary-down" if is_visible else "secondary")), *ui.pages_contents state_visible = gr.State(value=False) - button.click(fn=toggle_visibility, inputs=[state_visible], outputs=[state_visible, container, button]) + button.click(fn=toggle_visibility, inputs=[state_visible], outputs=[state_visible, container, button, *ui.pages]) def refresh(): - res = [] - for pg in ui.stored_extra_pages: pg.refresh() - res.append(pg.create_html(ui.tabname)) - return res + ui.pages_contents = [pg.create_html(ui.tabname) for pg in ui.stored_extra_pages] + + return ui.pages_contents button_refresh.click(fn=refresh, inputs=[], outputs=ui.pages) -- cgit v1.2.1 From 18fb2162a44ae06eaff302845abb880f27bcc975 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 12:17:36 +0300 Subject: disable useless progress display when pasting infotext using the blur button --- modules/generation_parameters_copypaste.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'modules') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 99f1a0d3..6cc8d13b 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -129,6 +129,7 @@ def connect_paste_params_buttons(): _js=jsfunc, inputs=[binding.source_image_component], outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component], + show_progress=False, ) if binding.source_text_component is not None and fields is not None: @@ -140,6 +141,7 @@ def connect_paste_params_buttons(): fn=lambda *x: x, inputs=[field for field, name in paste_fields[binding.source_tabname]["fields"] if name in paste_field_names], outputs=[field for field, name in fields if name in paste_field_names], + show_progress=False, ) binding.paste_button.click( @@ -147,6 +149,7 @@ def connect_paste_params_buttons(): _js=f"switch_to_{binding.tabname}", inputs=None, outputs=None, + show_progress=False, ) @@ -409,12 +412,14 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component, fn=paste_func, inputs=[input_comp], outputs=[x[0] for x in paste_fields], + show_progress=False, ) button.click( fn=None, _js=f"recalculate_prompts_{tabname}", inputs=[], outputs=[], + show_progress=False, ) -- cgit v1.2.1 From f62540b2d2a2692838eb6dfc831c6cf9da12531d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 12:18:22 +0300 Subject: Revert "add mtime to served images in gallery to prevent cache from showing old images" This reverts commit 669b518cbd2b9e8f5d7852a89627200f87c649d2. --- modules/ui_tempdir.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 9cb4954a..21945235 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -36,7 +36,7 @@ def save_pil_to_file(pil_image, dir=None): if already_saved_as and os.path.isfile(already_saved_as): register_tmp_file(shared.demo, already_saved_as) - file_obj = Savedfile(f"{already_saved_as}?{os.path.getmtime(already_saved_as)}") + file_obj = Savedfile(already_saved_as) return file_obj if shared.opts.temp_dir != "": -- cgit v1.2.1 From 505a10ad928eb11849828c88850ce3e5e3566fe4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 15:09:20 +0300 Subject: use file modification time instead of current time for #9760 --- modules/ui_tempdir.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 42a85d3b..67bfd1ec 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -35,7 +35,8 @@ def check_tmp_file(gradio, filename): def save_pil_to_file(pil_image, dir=None): already_saved_as = getattr(pil_image, 'already_saved_as', None) if already_saved_as and os.path.isfile(already_saved_as): - already_saved_as += f'?{int(time())}' + already_saved_as += f'?{os.path.getmtime(already_saved_as)}' + register_tmp_file(shared.demo, already_saved_as) file_obj = Savedfile(already_saved_as) -- cgit v1.2.1 From 7aab389d6fc8ad08729071b1ed9d4de64c4e44db Mon Sep 17 00:00:00 2001 From: brkirch Date: Fri, 14 Apr 2023 02:22:48 -0400 Subject: Fix for Unet NaNs --- modules/sd_hijack_optimizations.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules') diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 372555ff..f10865cd 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -256,6 +256,9 @@ def sub_quad_attention_forward(self, x, context=None, mask=None): k = k.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) v = v.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) + if q.device.type == 'mps': + q, k, v = q.contiguous(), k.contiguous(), v.contiguous() + dtype = q.dtype if shared.opts.upcast_attn: q, k = q.float(), k.float() -- cgit v1.2.1 From ab4ab4e595e89d1a9a39db70539d5944fdbe47fa Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 15:23:49 +0300 Subject: add version to infotext, footer and console output when starting --- modules/processing.py | 11 +++++++++++ modules/shared.py | 1 + modules/ui.py | 6 +++--- 3 files changed, 15 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index e8808beb..e786791a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -458,6 +458,16 @@ def fix_seed(p): p.subseed = get_fixed_seed(p.subseed) +def program_version(): + import launch + + res = launch.git_tag() + if res == "": + res = None + + return res + + def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0): index = position_in_batch + iteration * p.batch_size @@ -483,6 +493,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, + "Version": program_version() if opts.add_version_to_infotext else None, } generation_params.update(p.extra_generation_params) diff --git a/modules/shared.py b/modules/shared.py index dd374713..f40faa79 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -394,6 +394,7 @@ options_templates.update(options_section(('ui', "User interface"), { "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), + "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), "disable_weights_auto_swap": OptionInfo(True, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."), "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), diff --git a/modules/ui.py b/modules/ui.py index 16c46515..b2916e9c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1923,7 +1923,7 @@ def versions_html(): python_version = ".".join([str(x) for x in sys.version_info[0:3]]) commit = launch.commit_hash() - short_commit = commit[0:8] + tag = launch.git_tag() if shared.xformers_available: import xformers @@ -1932,6 +1932,8 @@ def versions_html(): xformers_version = "N/A" return f""" +version: {tag} + •  python: {python_version}  •  torch: {getattr(torch, '__long_version__',torch.__version__)} @@ -1940,7 +1942,5 @@ xformers: {xformers_version}  •  gradio: {gr.__version__}  •  -commit: {short_commit} - •  checkpoint: N/A """ -- cgit v1.2.1 From eabea24eb8ba5068c97ff5655bbe01dc032af4e9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 15:26:23 +0300 Subject: put infotext options into their own category in settings tab --- modules/shared.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index f40faa79..e1c3e5c4 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -392,10 +392,6 @@ options_templates.update(options_section(('ui', "User interface"), { "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), - "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), - "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), - "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), - "disable_weights_auto_swap": OptionInfo(True, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."), "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), "font": OptionInfo("", "Font for image grids that have text"), @@ -417,6 +413,13 @@ options_templates.update(options_section(('ui', "User interface"), { "gradio_theme": OptionInfo("Default", "Gradio theme (requires restart)", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}) })) +options_templates.update(options_section(('infotext', "Infotext"), { + "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), + "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), + "add_version_to_infotext": OptionInfo(True, "Add program version to generation information"), + "disable_weights_auto_swap": OptionInfo(True, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."), +})) + options_templates.update(options_section(('ui', "Live previews"), { "show_progressbar": OptionInfo(True, "Show progressbar"), "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), -- cgit v1.2.1 From fc966c029943ce37aaecd620645c770478395afc Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 15:30:32 +0300 Subject: do not show licenses page when user selects Show all pages in settings --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index b2916e9c..39efd576 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1566,7 +1566,7 @@ def create_ui(): current_row.__exit__() current_tab.__exit__() - with gr.TabItem("Actions", id="actions"): + with gr.TabItem("Actions", id="actions", elem_id="settings_tab_actions"): request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications") download_localization = gr.Button(value='Download localization template', elem_id="download_localization") reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") @@ -1574,7 +1574,7 @@ def create_ui(): unload_sd_model = gr.Button(value='Unload SD checkpoint to free VRAM', elem_id="sett_unload_sd_model") reload_sd_model = gr.Button(value='Reload the last SD checkpoint back into VRAM', elem_id="sett_reload_sd_model") - with gr.TabItem("Licenses", id="licenses"): + with gr.TabItem("Licenses", id="licenses", elem_id="settings_tab_licenses"): gr.HTML(shared.html("licenses.html"), elem_id="licenses") gr.Button(value="Show all pages", elem_id="settings_show_all_pages") -- cgit v1.2.1 From 5edb0acfeb424f71954b111910d2e08c410b0c43 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 15:38:25 +0300 Subject: use multiselect for quicksettings (this also resets the existing setting) --- modules/shared.py | 4 ++-- modules/ui.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index e1c3e5c4..a8154580 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -404,8 +404,8 @@ options_templates.update(options_section(('ui', "User interface"), { "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row"), "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), - "keyedit_delimiters": OptionInfo(".,\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), - "quicksettings": OptionInfo("sd_model_checkpoint", "Quicksettings list"), + "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), + "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}), "hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": [x for x in tab_names]}), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), diff --git a/modules/ui.py b/modules/ui.py index 39efd576..883d37e7 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1525,7 +1525,7 @@ def create_ui(): result = gr.HTML(elem_id="settings_result") - quicksettings_names = [x.strip() for x in opts.quicksettings.split(",")] + quicksettings_names = opts.quicksettings_list quicksettings_names = {x: i for i, x in enumerate(quicksettings_names) if x != 'quicksettings'} quicksettings_list = [] -- cgit v1.2.1 From 2b96a7b694d3392f76940dfe5df895a2833400fb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 8 May 2023 16:46:35 +0300 Subject: add links to wiki for filename pattern settings add extended info for quicksettings setting --- modules/ui.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index 883d37e7..842c57f7 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1944,3 +1944,17 @@ gradio: {gr.__version__}  •  checkpoint: N/A """ + + +def setup_ui_api(app): + from pydantic import BaseModel, Field + from typing import List + + class QuicksettingsHint(BaseModel): + name: str = Field(title="Name of the quicksettings field") + label: str = Field(title="Label of the quicksettings field") + + def quicksettings_hint(): + return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] + + app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) -- cgit v1.2.1 From 9efb809f7c2f6754367cafcce02926bf954815d5 Mon Sep 17 00:00:00 2001 From: brkirch Date: Mon, 8 May 2023 15:49:43 -0400 Subject: Remove PyTorch 2.0 check Apparently the commit in the main branch of pytorch/pytorch that fixes this issue didn't make it into PyTorch 2.0.1, and since it is unclear exactly which release will have it we'll just always apply the workaround so a crash doesn't occur regardless. --- modules/mac_specific.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 6fe8dea0..68bffec6 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -54,6 +54,6 @@ if has_mps: CondFunc('torch.cumsum', cumsum_fix_func, None) CondFunc('torch.Tensor.cumsum', cumsum_fix_func, None) CondFunc('torch.narrow', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).clone(), None) - if version.parse(torch.__version__) == version.parse("2.0"): + # MPS workaround for https://github.com/pytorch/pytorch/issues/96113 - CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda *args, **kwargs: len(args) == 6) + CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda _, input, *args, **kwargs: len(args) == 4 and input.device.type == 'mps') -- cgit v1.2.1 From de401d8ffb46515a7cb4749f564d6a23085b4a5e Mon Sep 17 00:00:00 2001 From: brkirch Date: Mon, 8 May 2023 16:32:40 -0400 Subject: Fix generation with k-diffusion/UniPC on x64 Macs --- modules/mac_specific.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'modules') diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 68bffec6..40ce2101 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -57,3 +57,8 @@ if has_mps: # MPS workaround for https://github.com/pytorch/pytorch/issues/96113 CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda _, input, *args, **kwargs: len(args) == 4 and input.device.type == 'mps') + + # MPS workaround for https://github.com/pytorch/pytorch/issues/92311 + if platform.processor() == 'i386': + for funcName in ['torch.argmax', 'torch.Tensor.argmax']: + CondFunc(funcName, lambda _, input, *args, **kwargs: torch.max(input.float() if input.dtype == torch.int64 else input, *args, **kwargs)[1], lambda _, input, *args, **kwargs: input.device.type == 'mps') \ No newline at end of file -- cgit v1.2.1 From ad6ec0226118b80e79446f16747976a1dd1fabcd Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 9 May 2023 11:42:47 +0300 Subject: prevent Reload UI button/link from reloading the page when it's not yet ready --- modules/ui.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index 842c57f7..34b2aaff 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1958,3 +1958,5 @@ def setup_ui_api(app): return [QuicksettingsHint(name=k, label=v.label) for k, v in opts.data_labels.items()] app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint]) + + app.add_api_route("/internal/ping", lambda: {}, methods=["GET"]) -- cgit v1.2.1 From d1ff57e1cb602a4ebac80a25b8e3ce2424278f94 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 9 May 2023 18:14:12 +0900 Subject: 1.1.1 quicksettings list migration --- modules/shared.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index a8154580..090707ca 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -549,6 +549,10 @@ class Options: with open(filename, "r", encoding="utf8") as file: self.data = json.load(file) + # 1.1.1 quicksettings list migration + if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None: + self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')] + bad_settings = 0 for k, v in self.data.items(): info = self.data_labels.get(k, None) -- cgit v1.2.1 From e7dbefc3408cc01bda613018a6c1e1364c80b63e Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Tue, 9 May 2023 19:06:00 +0800 Subject: refresh fix --- modules/ui_tempdir.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 67bfd1ec..46fa9cb0 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -2,7 +2,6 @@ import os import tempfile from collections import namedtuple from pathlib import Path -from time import time import gradio as gr @@ -35,11 +34,9 @@ def check_tmp_file(gradio, filename): def save_pil_to_file(pil_image, dir=None): already_saved_as = getattr(pil_image, 'already_saved_as', None) if already_saved_as and os.path.isfile(already_saved_as): - already_saved_as += f'?{os.path.getmtime(already_saved_as)}' - register_tmp_file(shared.demo, already_saved_as) - file_obj = Savedfile(already_saved_as) + file_obj = Savedfile(f'{already_saved_as}?{os.path.getmtime(already_saved_as)}') return file_obj if shared.opts.temp_dir != "": -- cgit v1.2.1 From 7fd3a4e6d7b1c70461eed0c8a7dc4f2412cdaf1c Mon Sep 17 00:00:00 2001 From: Micky Brunetti Date: Tue, 9 May 2023 15:35:57 +0200 Subject: files in vae folder with same name as a checkpoint can be found too --- modules/sd_vae.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 9b00f76e..4d2026e1 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -88,10 +88,13 @@ def refresh_vae_list(): def find_vae_near_checkpoint(checkpoint_file): - checkpoint_path = os.path.splitext(checkpoint_file)[0] - for vae_location in [checkpoint_path + ".vae.pt", checkpoint_path + ".vae.ckpt", checkpoint_path + ".vae.safetensors"]: - if os.path.isfile(vae_location): - return vae_location + checkpoint_path = os.path.basename(checkpoint_file).split('.', 1)[0] + print(f"checkpoint: {checkpoint_path}") + for vae_file in vae_dict.values(): + vae_path = os.path.basename(vae_file).split('.', 1)[0] + print(f"vae: {vae_path}") + if vae_path == checkpoint_path: + return vae_file return None -- cgit v1.2.1 From 749a93295e5259fbba2e2a849cde5a37c67aa69f Mon Sep 17 00:00:00 2001 From: Micky Brunetti Date: Tue, 9 May 2023 15:43:58 +0200 Subject: remove logs --- modules/sd_vae.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'modules') diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 4d2026e1..17d1f702 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -89,10 +89,8 @@ def refresh_vae_list(): def find_vae_near_checkpoint(checkpoint_file): checkpoint_path = os.path.basename(checkpoint_file).split('.', 1)[0] - print(f"checkpoint: {checkpoint_path}") for vae_file in vae_dict.values(): vae_path = os.path.basename(vae_file).split('.', 1)[0] - print(f"vae: {vae_path}") if vae_path == checkpoint_path: return vae_file -- cgit v1.2.1 From 3ba6c3c83c0983a025c7bddc08bb7f49481b3cbb Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Tue, 9 May 2023 22:17:58 +0300 Subject: Fix up string formatting/concatenation to f-strings where feasible --- modules/api/api.py | 22 ++++++------ modules/call_queue.py | 5 +-- modules/esrgan_model.py | 11 +++--- modules/esrgan_model_arch.py | 16 ++++----- modules/extra_networks_hypernet.py | 3 +- modules/generation_parameters_copypaste.py | 4 +-- modules/hashes.py | 4 +-- modules/images.py | 8 ++--- modules/interrogate.py | 4 +-- modules/models/diffusion/ddpm_edit.py | 4 +-- modules/models/diffusion/uni_pc/uni_pc.py | 4 +-- modules/ngrok.py | 4 +-- modules/paths.py | 2 +- modules/processing.py | 13 ++++++-- modules/progress.py | 3 +- modules/realesrgan_model.py | 8 ++--- modules/scripts.py | 5 +-- modules/sd_hijack_clip_old.py | 3 +- modules/sd_hijack_unet.py | 2 +- modules/sd_models.py | 4 +-- modules/sd_models_config.py | 2 +- modules/sd_samplers_kdiffusion.py | 2 +- modules/sd_vae.py | 2 +- modules/styles.py | 2 +- modules/textual_inversion/autocrop.py | 6 ++-- modules/textual_inversion/dataset.py | 2 +- modules/textual_inversion/preprocess.py | 6 ++-- modules/textual_inversion/textual_inversion.py | 12 +++---- modules/ui.py | 46 +++++++++++++------------- modules/ui_extensions.py | 3 +- modules/ui_extra_networks.py | 4 ++- 31 files changed, 118 insertions(+), 98 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index cdbdce32..9bb95dfd 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -570,20 +570,20 @@ class Api: filename = create_embedding(**args) # create empty embedding sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used shared.state.end() - return CreateResponse(info = "create embedding filename: {filename}".format(filename = filename)) + return CreateResponse(info=f"create embedding filename: {filename}") except AssertionError as e: shared.state.end() - return TrainResponse(info = "create embedding error: {error}".format(error = e)) + return TrainResponse(info=f"create embedding error: {e}") def create_hypernetwork(self, args: dict): try: shared.state.begin() filename = create_hypernetwork(**args) # create empty embedding shared.state.end() - return CreateResponse(info = "create hypernetwork filename: {filename}".format(filename = filename)) + return CreateResponse(info=f"create hypernetwork filename: {filename}") except AssertionError as e: shared.state.end() - return TrainResponse(info = "create hypernetwork error: {error}".format(error = e)) + return TrainResponse(info=f"create hypernetwork error: {e}") def preprocess(self, args: dict): try: @@ -593,13 +593,13 @@ class Api: return PreprocessResponse(info = 'preprocess complete') except KeyError as e: shared.state.end() - return PreprocessResponse(info = "preprocess error: invalid token: {error}".format(error = e)) + return PreprocessResponse(info=f"preprocess error: invalid token: {e}") except AssertionError as e: shared.state.end() - return PreprocessResponse(info = "preprocess error: {error}".format(error = e)) + return PreprocessResponse(info=f"preprocess error: {e}") except FileNotFoundError as e: shared.state.end() - return PreprocessResponse(info = 'preprocess error: {error}'.format(error = e)) + return PreprocessResponse(info=f'preprocess error: {e}') def train_embedding(self, args: dict): try: @@ -617,10 +617,10 @@ class Api: if not apply_optimizations: sd_hijack.apply_optimizations() shared.state.end() - return TrainResponse(info = "train embedding complete: filename: {filename} error: {error}".format(filename = filename, error = error)) + return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") except AssertionError as msg: shared.state.end() - return TrainResponse(info = "train embedding error: {msg}".format(msg = msg)) + return TrainResponse(info=f"train embedding error: {msg}") def train_hypernetwork(self, args: dict): try: @@ -641,10 +641,10 @@ class Api: if not apply_optimizations: sd_hijack.apply_optimizations() shared.state.end() - return TrainResponse(info="train embedding complete: filename: {filename} error: {error}".format(filename=filename, error=error)) + return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") except AssertionError as msg: shared.state.end() - return TrainResponse(info="train embedding error: {error}".format(error=error)) + return TrainResponse(info=f"train embedding error: {error}") def get_memory(self): try: diff --git a/modules/call_queue.py b/modules/call_queue.py index 1829f3a6..447bb764 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -60,7 +60,7 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): max_debug_str_len = 131072 # (1024*1024)/8 print("Error completing request", file=sys.stderr) - argStr = f"Arguments: {str(args)} {str(kwargs)}" + argStr = f"Arguments: {args} {kwargs}" print(argStr[:max_debug_str_len], file=sys.stderr) if len(argStr) > max_debug_str_len: print(f"(Argument list truncated at {max_debug_str_len}/{len(argStr)} characters)", file=sys.stderr) @@ -73,7 +73,8 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): if extra_outputs_array is None: extra_outputs_array = [None, ''] - res = extra_outputs_array + [f"
{html.escape(type(e).__name__+': '+str(e))}
"] + error_message = f'{type(e).__name__}: {e}' + res = extra_outputs_array + [f"
{html.escape(error_message)}
"] shared.state.skipped = False shared.state.interrupted = False diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index 9a9c38f1..f4369257 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -156,13 +156,16 @@ class UpscalerESRGAN(Upscaler): def load_model(self, path: str): if "http" in path: - filename = load_file_from_url(url=self.model_url, model_dir=self.model_path, - file_name="%s.pth" % self.model_name, - progress=True) + filename = load_file_from_url( + url=self.model_url, + model_dir=self.model_path, + file_name=f"{self.model_name}.pth", + progress=True, + ) else: filename = path if not os.path.exists(filename) or filename is None: - print("Unable to load %s from %s" % (self.model_path, filename)) + print(f"Unable to load {self.model_path} from {filename}") return None state_dict = torch.load(filename, map_location='cpu' if devices.device_esrgan.type == 'mps' else None) diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index 1b52b0f5..6071fea7 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -38,7 +38,7 @@ class RRDBNet(nn.Module): elif upsample_mode == 'pixelshuffle': upsample_block = pixelshuffle_block else: - raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode)) + raise NotImplementedError(f'upsample mode [{upsample_mode}] is not found') if upscale == 3: upsampler = upsample_block(nf, nf, 3, act_type=act_type, convtype=convtype) else: @@ -261,10 +261,10 @@ class Upsample(nn.Module): def extra_repr(self): if self.scale_factor is not None: - info = 'scale_factor=' + str(self.scale_factor) + info = f'scale_factor={self.scale_factor}' else: - info = 'size=' + str(self.size) - info += ', mode=' + self.mode + info = f'size={self.size}' + info += f', mode={self.mode}' return info @@ -350,7 +350,7 @@ def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1, beta=1.0): elif act_type == 'sigmoid': # [0, 1] range output layer = nn.Sigmoid() else: - raise NotImplementedError('activation layer [{:s}] is not found'.format(act_type)) + raise NotImplementedError(f'activation layer [{act_type}] is not found') return layer @@ -372,7 +372,7 @@ def norm(norm_type, nc): elif norm_type == 'none': def norm_layer(x): return Identity() else: - raise NotImplementedError('normalization layer [{:s}] is not found'.format(norm_type)) + raise NotImplementedError(f'normalization layer [{norm_type}] is not found') return layer @@ -388,7 +388,7 @@ def pad(pad_type, padding): elif pad_type == 'zero': layer = nn.ZeroPad2d(padding) else: - raise NotImplementedError('padding layer [{:s}] is not implemented'.format(pad_type)) + raise NotImplementedError(f'padding layer [{pad_type}] is not implemented') return layer @@ -432,7 +432,7 @@ def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias= pad_type='zero', norm_type=None, act_type='relu', mode='CNA', convtype='Conv2D', spectral_norm=False): """ Conv layer with padding, normalization, activation """ - assert mode in ['CNA', 'NAC', 'CNAC'], 'Wrong conv mode [{:s}]'.format(mode) + assert mode in ['CNA', 'NAC', 'CNAC'], f'Wrong conv mode [{mode}]' padding = get_valid_padding(kernel_size, dilation) p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None padding = padding if pad_type == 'zero' else 0 diff --git a/modules/extra_networks_hypernet.py b/modules/extra_networks_hypernet.py index 33d100dd..04f27c9f 100644 --- a/modules/extra_networks_hypernet.py +++ b/modules/extra_networks_hypernet.py @@ -10,7 +10,8 @@ class ExtraNetworkHypernet(extra_networks.ExtraNetwork): additional = shared.opts.sd_hypernetwork if additional != "None" and additional in shared.hypernetworks and len([x for x in params_list if x.items[0] == additional]) == 0: - p.all_prompts = [x + f"" for x in p.all_prompts] + hypernet_prompt_text = f"" + p.all_prompts = [f"{prompt}{hypernet_prompt_text}" for prompt in p.all_prompts] params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) names = [] diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 78248ed2..fe8b18b2 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -269,8 +269,8 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model v = v[1:-1] if v[0] == '"' and v[-1] == '"' else v m = re_imagesize.match(v) if m is not None: - res[k+"-1"] = m.group(1) - res[k+"-2"] = m.group(2) + res[f"{k}-1"] = m.group(1) + res[f"{k}-2"] = m.group(2) else: res[k] = v diff --git a/modules/hashes.py b/modules/hashes.py index 83272a07..032120f4 100644 --- a/modules/hashes.py +++ b/modules/hashes.py @@ -13,7 +13,7 @@ cache_data = None def dump_cache(): - with filelock.FileLock(cache_filename+".lock"): + with filelock.FileLock(f"{cache_filename}.lock"): with open(cache_filename, "w", encoding="utf8") as file: json.dump(cache_data, file, indent=4) @@ -22,7 +22,7 @@ def cache(subsection): global cache_data if cache_data is None: - with filelock.FileLock(cache_filename+".lock"): + with filelock.FileLock(f"{cache_filename}.lock"): if not os.path.isfile(cache_filename): cache_data = {} else: diff --git a/modules/images.py b/modules/images.py index 6ceb7c7c..a41965ab 100644 --- a/modules/images.py +++ b/modules/images.py @@ -467,7 +467,7 @@ def get_next_sequence_number(path, basename): """ result = -1 if basename != '': - basename = basename + "-" + basename = f"{basename}-" prefix_length = len(basename) for p in os.listdir(path): @@ -536,7 +536,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i add_number = opts.save_images_add_number or file_decoration == '' if file_decoration != "" and add_number: - file_decoration = "-" + file_decoration + file_decoration = f"-{file_decoration}" file_decoration = namegen.apply(file_decoration) + suffix @@ -566,7 +566,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i def _atomically_save_image(image_to_save, filename_without_extension, extension): # save image with .tmp extension to avoid race condition when another process detects new image in the directory - temp_file_path = filename_without_extension + ".tmp" + temp_file_path = f"{filename_without_extension}.tmp" image_format = Image.registered_extensions()[extension] if extension.lower() == '.png': @@ -626,7 +626,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i if opts.save_txt and info is not None: txt_fullfn = f"{fullfn_without_extension}.txt" with open(txt_fullfn, "w", encoding="utf8") as file: - file.write(info + "\n") + file.write(f"{info}\n") else: txt_fullfn = None diff --git a/modules/interrogate.py b/modules/interrogate.py index e1665708..9f7d657f 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -28,7 +28,7 @@ def category_types(): def download_default_clip_interrogate_categories(content_dir): print("Downloading CLIP categories...") - tmpdir = content_dir + "_tmp" + tmpdir = f"{content_dir}_tmp" category_types = ["artists", "flavors", "mediums", "movements"] try: @@ -214,7 +214,7 @@ class InterrogateModels: if shared.opts.interrogate_return_ranks: res += f", ({match}:{score/100:.3f})" else: - res += ", " + match + res += f", {match}" except Exception: print("Error interrogating", file=sys.stderr) diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index f3d49c44..f880bc3c 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -223,7 +223,7 @@ class DDPM(pl.LightningModule): for k in keys: for ik in ignore_keys: if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) + print(f"Deleting key {k} from state_dict.") del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) @@ -386,7 +386,7 @@ class DDPM(pl.LightningModule): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) - loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} + loss_dict_ema = {f"{key}_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py index eb5f4e76..11b330bc 100644 --- a/modules/models/diffusion/uni_pc/uni_pc.py +++ b/modules/models/diffusion/uni_pc/uni_pc.py @@ -94,7 +94,7 @@ class NoiseScheduleVP: """ if schedule not in ['discrete', 'linear', 'cosine']: - raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule)) + raise ValueError(f"Unsupported noise schedule {schedule}. The schedule needs to be 'discrete' or 'linear' or 'cosine'") self.schedule = schedule if schedule == 'discrete': @@ -469,7 +469,7 @@ class UniPC: t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device) return t else: - raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) + raise ValueError(f"Unsupported skip_type {skip_type}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'") def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): """ diff --git a/modules/ngrok.py b/modules/ngrok.py index 1ad7989b..7a7b4b26 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -7,8 +7,8 @@ def connect(token, port, region): else: if ':' in token: # token = authtoken:username:password - account = token.split(':')[1] + ':' + token.split(':')[-1] - token = token.split(':')[0] + token, username, password = token.split(':', 2) + account = f"{username}:{password}" config = conf.PyngrokConfig( auth_token=token, region=region diff --git a/modules/paths.py b/modules/paths.py index 0e1e00e7..acf1894b 100644 --- a/modules/paths.py +++ b/modules/paths.py @@ -16,7 +16,7 @@ for possible_sd_path in possible_sd_paths: sd_path = os.path.abspath(possible_sd_path) break -assert sd_path is not None, "Couldn't find Stable Diffusion in any of: " + str(possible_sd_paths) +assert sd_path is not None, f"Couldn't find Stable Diffusion in any of: {possible_sd_paths}" path_dirs = [ (sd_path, 'ldm', 'Stable Diffusion', []), diff --git a/modules/processing.py b/modules/processing.py index e786791a..1a76e552 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -500,7 +500,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) - negative_prompt_text = "\nNegative prompt: " + p.all_negative_prompts[index] if p.all_negative_prompts[index] else "" + negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else "" return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip() @@ -780,7 +780,16 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() - res = Processed(p, output_images, p.all_seeds[0], infotext(), comments="".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts) + res = Processed( + p, + images_list=output_images, + seed=p.all_seeds[0], + info=infotext(), + comments="".join(f"\n\n{comment}" for comment in comments), + subseed=p.all_subseeds[0], + index_of_first_image=index_of_first_image, + infotexts=infotexts, + ) if p.scripts is not None: p.scripts.postprocess(p, res) diff --git a/modules/progress.py b/modules/progress.py index 5655346b..948e6f00 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -96,7 +96,8 @@ def progressapi(req: ProgressRequest): if image is not None: buffered = io.BytesIO() image.save(buffered, format="png") - live_preview = 'data:image/png;base64,' + base64.b64encode(buffered.getvalue()).decode("ascii") + base64_image = base64.b64encode(buffered.getvalue()).decode('ascii') + live_preview = f"data:image/png;base64,{base64_image}" id_live_preview = shared.state.id_live_preview else: live_preview = None diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index d6079433..efd7fca5 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -28,9 +28,9 @@ class UpscalerRealESRGAN(Upscaler): for scaler in scalers: if scaler.local_data_path.startswith("http"): filename = modelloader.friendly_name(scaler.local_data_path) - local = next(iter([local_model for local_model in local_model_paths if local_model.endswith(filename + '.pth')]), None) - if local: - scaler.local_data_path = local + local_model_candidates = [local_model for local_model in local_model_paths if local_model.endswith(f"{filename}.pth")] + if local_model_candidates: + scaler.local_data_path = local_model_candidates[0] if scaler.name in opts.realesrgan_enabled_models: self.scalers.append(scaler) @@ -47,7 +47,7 @@ class UpscalerRealESRGAN(Upscaler): info = self.load_model(path) if not os.path.exists(info.local_data_path): - print("Unable to load RealESRGAN model: %s" % info.name) + print(f"Unable to load RealESRGAN model: {info.name}") return img upsampler = RealESRGANer( diff --git a/modules/scripts.py b/modules/scripts.py index 4d0bbd66..d945b89f 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -163,7 +163,8 @@ class Script: """helper function to generate id for a HTML element, constructs final id out of script name, tab and user-supplied item_id""" need_tabname = self.show(True) == self.show(False) - tabname = ('img2img' if self.is_img2img else 'txt2txt') + "_" if need_tabname else "" + tabkind = 'img2img' if self.is_img2img else 'txt2txt' + tabname = f"{tabkind}_" if need_tabname else "" title = re.sub(r'[^a-z_0-9]', '', re.sub(r'\s', '_', self.title().lower())) return f'script_{tabname}{title}_{item_id}' @@ -526,7 +527,7 @@ def add_classes_to_gradio_component(comp): this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others """ - comp.elem_classes = ["gradio-" + comp.get_block_name(), *(comp.elem_classes or [])] + comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])] if getattr(comp, 'multiselect', False): comp.elem_classes.append('multiselect') diff --git a/modules/sd_hijack_clip_old.py b/modules/sd_hijack_clip_old.py index 6d9fbbe6..a3476e95 100644 --- a/modules/sd_hijack_clip_old.py +++ b/modules/sd_hijack_clip_old.py @@ -75,7 +75,8 @@ def forward_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, text self.hijack.comments += hijack_comments if len(used_custom_terms) > 0: - self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms])) + embedding_names = ", ".join(f"{word} [{checksum}]" for word, checksum in used_custom_terms) + self.hijack.comments.append(f"Used embeddings: {embedding_names}") self.hijack.fixes = hijack_fixes return self.process_tokens(remade_batch_tokens, batch_multipliers) diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py index 15858263..ca1daf45 100644 --- a/modules/sd_hijack_unet.py +++ b/modules/sd_hijack_unet.py @@ -18,7 +18,7 @@ class TorchHijackForUnet: if hasattr(torch, item): return getattr(torch, item) - raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item)) + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") def cat(self, tensors, *args, **kwargs): if len(tensors) == 2: diff --git a/modules/sd_models.py b/modules/sd_models.py index 59adc7cc..36f643e1 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -47,7 +47,7 @@ class CheckpointInfo: self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0] self.hash = model_hash(filename) - self.sha256 = hashes.sha256_from_cache(self.filename, "checkpoint/" + name) + self.sha256 = hashes.sha256_from_cache(self.filename, f"checkpoint/{name}") self.shorthash = self.sha256[0:10] if self.sha256 else None self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]' @@ -69,7 +69,7 @@ class CheckpointInfo: checkpoint_alisases[id] = self def calculate_shorthash(self): - self.sha256 = hashes.sha256(self.filename, "checkpoint/" + self.name) + self.sha256 = hashes.sha256(self.filename, f"checkpoint/{self.name}") if self.sha256 is None: return diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 9398f528..7a79925a 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -111,7 +111,7 @@ def find_checkpoint_config_near_filename(info): if info is None: return None - config = os.path.splitext(info.filename)[0] + ".yaml" + config = f"{os.path.splitext(info.filename)[0]}.yaml" if os.path.exists(config): return config diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index eb98e599..0fc9f456 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -198,7 +198,7 @@ class TorchHijack: if hasattr(torch, item): return getattr(torch, item) - raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item)) + raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'") def randn_like(self, x): if self.sampler_noises: diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 9b00f76e..521e485a 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -89,7 +89,7 @@ def refresh_vae_list(): def find_vae_near_checkpoint(checkpoint_file): checkpoint_path = os.path.splitext(checkpoint_file)[0] - for vae_location in [checkpoint_path + ".vae.pt", checkpoint_path + ".vae.ckpt", checkpoint_path + ".vae.safetensors"]: + for vae_location in [f"{checkpoint_path}.vae.pt", f"{checkpoint_path}.vae.ckpt", f"{checkpoint_path}.vae.safetensors"]: if os.path.isfile(vae_location): return vae_location diff --git a/modules/styles.py b/modules/styles.py index 9ed85991..11642075 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -74,7 +74,7 @@ class StyleDatabase: def save_styles(self, path: str) -> None: # Always keep a backup file around if os.path.exists(path): - shutil.copy(path, path + ".bak") + shutil.copy(path, f"{path}.bak") fd = os.open(path, os.O_RDWR|os.O_CREAT) with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file: diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index 68e1103c..ba1bdcd4 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -111,7 +111,7 @@ def focal_point(im, settings): if corner_centroid is not None: color = BLUE box = corner_centroid.bounding(max_size * corner_centroid.weight) - d.text((box[0], box[1]-15), "Edge: %.02f" % corner_centroid.weight, fill=color) + d.text((box[0], box[1]-15), f"Edge: {corner_centroid.weight:.02f}", fill=color) d.ellipse(box, outline=color) if len(corner_points) > 1: for f in corner_points: @@ -119,7 +119,7 @@ def focal_point(im, settings): if entropy_centroid is not None: color = "#ff0" box = entropy_centroid.bounding(max_size * entropy_centroid.weight) - d.text((box[0], box[1]-15), "Entropy: %.02f" % entropy_centroid.weight, fill=color) + d.text((box[0], box[1]-15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color) d.ellipse(box, outline=color) if len(entropy_points) > 1: for f in entropy_points: @@ -127,7 +127,7 @@ def focal_point(im, settings): if face_centroid is not None: color = RED box = face_centroid.bounding(max_size * face_centroid.weight) - d.text((box[0], box[1]-15), "Face: %.02f" % face_centroid.weight, fill=color) + d.text((box[0], box[1]-15), f"Face: {face_centroid.weight:.02f}", fill=color) d.ellipse(box, outline=color) if len(face_points) > 1: for f in face_points: diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index af9fbcf2..41610e03 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -72,7 +72,7 @@ class PersonalizedBase(Dataset): except Exception: continue - text_filename = os.path.splitext(path)[0] + ".txt" + text_filename = f"{os.path.splitext(path)[0]}.txt" filename = os.path.basename(path) if os.path.exists(text_filename): diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 4a29151d..da0bcb26 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -63,9 +63,9 @@ def save_pic_with_caption(image, index, params: PreprocessParams, existing_capti image.save(os.path.join(params.dstdir, f"{basename}.png")) if params.preprocess_txt_action == 'prepend' and existing_caption: - caption = existing_caption + ' ' + caption + caption = f"{existing_caption} {caption}" elif params.preprocess_txt_action == 'append' and existing_caption: - caption = caption + ' ' + existing_caption + caption = f"{caption} {existing_caption}" elif params.preprocess_txt_action == 'copy' and existing_caption: caption = existing_caption @@ -174,7 +174,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre params.src = filename existing_caption = None - existing_caption_filename = os.path.splitext(filename)[0] + '.txt' + existing_caption_filename = f"{os.path.splitext(filename)[0]}.txt" if os.path.exists(existing_caption_filename): with open(existing_caption_filename, 'r', encoding="utf8") as file: existing_caption = file.read() diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 379df243..4368eb63 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -69,7 +69,7 @@ class Embedding: 'hash': self.checksum(), 'optimizer_state_dict': self.optimizer_state_dict, } - torch.save(optimizer_saved_dict, filename + '.optim') + torch.save(optimizer_saved_dict, f"{filename}.optim") def checksum(self): if self.cached_checksum is not None: @@ -437,8 +437,8 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0) if shared.opts.save_optimizer_state: optimizer_state_dict = None - if os.path.exists(filename + '.optim'): - optimizer_saved_dict = torch.load(filename + '.optim', map_location='cpu') + if os.path.exists(f"{filename}.optim"): + optimizer_saved_dict = torch.load(f"{filename}.optim", map_location='cpu') if embedding.checksum() == optimizer_saved_dict.get('hash', None): optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None) @@ -599,7 +599,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st data = torch.load(last_saved_file) info.add_text("sd-ti-embedding", embedding_to_b64(data)) - title = "<{}>".format(data.get('name', '???')) + title = f"<{data.get('name', '???')}>" try: vectorSize = list(data['string_to_param'].values())[0].shape[0] @@ -608,8 +608,8 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st checkpoint = sd_models.select_checkpoint() footer_left = checkpoint.model_name - footer_mid = '[{}]'.format(checkpoint.shorthash) - footer_right = '{}v {}s'.format(vectorSize, steps_done) + footer_mid = f'[{checkpoint.shorthash}]' + footer_right = f'{vectorSize}v {steps_done}s' captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right) captioned_image = insert_image_data_embed(captioned_image, data) diff --git a/modules/ui.py b/modules/ui.py index 34b2aaff..d02f6e82 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -101,7 +101,7 @@ def visit(x, func, path=""): for c in x.children: visit(c, func, path) elif x.label is not None: - func(path + "/" + str(x.label), x) + func(f"{path}/{x.label}", x) def add_style(name: str, prompt: str, negative_prompt: str): @@ -166,7 +166,7 @@ def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_di img = Image.open(image) filename = os.path.basename(image) left, _ = os.path.splitext(filename) - print(interrogation_function(img), file=open(os.path.join(ii_output_dir, left + ".txt"), 'a')) + print(interrogation_function(img), file=open(os.path.join(ii_output_dir, f"{left}.txt"), 'a')) return [gr.update(), None] @@ -182,29 +182,29 @@ def interrogate_deepbooru(image): def create_seed_inputs(target_interface): - with FormRow(elem_id=target_interface + '_seed_row', variant="compact"): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed') + with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"): + seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed") seed.style(container=False) - random_seed = ToolButton(random_symbol, elem_id=target_interface + '_random_seed', label='Random seed') - reuse_seed = ToolButton(reuse_symbol, elem_id=target_interface + '_reuse_seed', label='Reuse seed') + random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed') + reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed') - seed_checkbox = gr.Checkbox(label='Extra', elem_id=target_interface + '_subseed_show', value=False) + seed_checkbox = gr.Checkbox(label='Extra', elem_id=f"{target_interface}_subseed_show", value=False) # Components to show/hide based on the 'Extra' checkbox seed_extras = [] - with FormRow(visible=False, elem_id=target_interface + '_subseed_row') as seed_extra_row_1: + with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1: seed_extras.append(seed_extra_row_1) - subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed') + subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed") subseed.style(container=False) - random_subseed = ToolButton(random_symbol, elem_id=target_interface + '_random_subseed') - reuse_subseed = ToolButton(reuse_symbol, elem_id=target_interface + '_reuse_subseed') - subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=target_interface + '_subseed_strength') + random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed") + reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed") + subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength") with FormRow(visible=False) as seed_extra_row_2: seed_extras.append(seed_extra_row_2) - seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=target_interface + '_seed_resize_from_w') - seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=target_interface + '_seed_resize_from_h') + seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=f"{target_interface}_seed_resize_from_w") + seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=f"{target_interface}_seed_resize_from_h") random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed]) random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed]) @@ -765,7 +765,7 @@ def create_ui(): ) button.click( fn=lambda: None, - _js="switch_to_"+name.replace(" ", "_"), + _js=f"switch_to_{name.replace(' ', '_')}", inputs=[], outputs=[], ) @@ -1462,18 +1462,18 @@ def create_ui(): elif t == bool: comp = gr.Checkbox else: - raise Exception(f'bad options item type: {str(t)} for key {key}') + raise Exception(f'bad options item type: {t} for key {key}') - elem_id = "setting_"+key + elem_id = f"setting_{key}" if info.refresh is not None: if is_quicksettings: res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) - create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key) + create_refresh_button(res, info.refresh, info.component_args, f"refresh_{key}") else: with FormRow(): res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) - create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key) + create_refresh_button(res, info.refresh, info.component_args, f"refresh_{key}") else: res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) @@ -1545,7 +1545,7 @@ def create_ui(): current_tab.__exit__() gr.Group() - current_tab = gr.TabItem(elem_id="settings_{}".format(elem_id), label=text) + current_tab = gr.TabItem(elem_id=f"settings_{elem_id}", label=text) current_tab.__enter__() current_row = gr.Column(variant='compact') current_row.__enter__() @@ -1664,7 +1664,7 @@ def create_ui(): for interface, label, ifid in interfaces: if label in shared.opts.hidden_tabs: continue - with gr.TabItem(label, id=ifid, elem_id='tab_' + ifid): + with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): interface.render() if os.path.exists(os.path.join(script_path, "notification.mp3")): @@ -1771,10 +1771,10 @@ def create_ui(): def loadsave(path, x): def apply_field(obj, field, condition=None, init_field=None): - key = path + "/" + field + key = f"{path}/{field}" if getattr(obj, 'custom_script_source', None) is not None: - key = 'customscript/' + obj.custom_script_source + '/' + key + key = f"customscript/{obj.custom_script_source}/{key}" if getattr(obj, 'do_not_save_to_config', False): return diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 99ac8756..d9faf85a 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -61,7 +61,8 @@ def save_config_state(name): if not name: name = "Config" current_config_state["name"] = name - filename = os.path.join(config_states_dir, datetime.now().strftime("%Y_%m_%d-%H_%M_%S") + "_" + name + ".json") + timestamp = datetime.now().strftime('%Y_%m_%d-%H_%M_%S') + filename = os.path.join(config_states_dir, f"{timestamp}_{name}.json") print(f"Saving backup of webui/extension state to {filename}.") with open(filename, "w", encoding="utf-8") as f: json.dump(current_config_state, f) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 86c05a55..8c3dea56 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -69,7 +69,9 @@ class ExtraNetworksPage: pass def link_preview(self, filename): - return "./sd_extra_networks/thumb?filename=" + urllib.parse.quote(filename.replace('\\', '/')) + "&mtime=" + str(os.path.getmtime(filename)) + quoted_filename = urllib.parse.quote(filename.replace('\\', '/')) + mtime = os.path.getmtime(filename) + return f"./sd_extra_networks/thumb?filename={quoted_filename}&mtime={mtime}" def search_terms_from_path(self, filename, possible_directories=None): abspath = os.path.abspath(filename) -- cgit v1.2.1 From d50b95b5a32b917ded123891dce3c8a018fca064 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 07:14:13 +0300 Subject: fix an issue preventing the program from starting if the user specifies a bad gradio theme --- modules/shared.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 090707ca..4631965b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -672,8 +672,8 @@ def reload_gradio_theme(theme_name=None): else: try: gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) - except requests.exceptions.ConnectionError: - print("Can't access HuggingFace Hub, falling back to default Gradio theme") + except Exception as e: + errors.display(e, "changing gradio theme") gradio_theme = gr.themes.Default() -- cgit v1.2.1 From 762265eab58cdb8f2d6398769bab43d8b8db0075 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 07:52:45 +0300 Subject: autofixes from ruff --- modules/api/api.py | 14 +++++++------- modules/extras.py | 4 ++-- modules/images.py | 4 ++-- modules/img2img.py | 2 +- modules/prompt_parser.py | 2 +- modules/realesrgan_model.py | 2 +- modules/sd_disable_initialization.py | 2 +- modules/sd_hijack.py | 4 ++-- modules/sd_hijack_ip2p.py | 2 +- modules/sd_hijack_optimizations.py | 1 - modules/sd_models.py | 6 +++--- modules/textual_inversion/textual_inversion.py | 2 +- modules/ui.py | 13 ++++++------- modules/ui_extensions.py | 2 +- modules/ui_extra_networks.py | 2 +- 15 files changed, 30 insertions(+), 32 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 9bb95dfd..d47c39fc 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -60,7 +60,7 @@ def decode_base64_to_image(encoding): try: image = Image.open(BytesIO(base64.b64decode(encoding))) return image - except Exception as err: + except Exception: raise HTTPException(status_code=500, detail="Invalid encoded image") def encode_pil_to_base64(image): @@ -264,11 +264,11 @@ class Api: if request.alwayson_scripts and (len(request.alwayson_scripts) > 0): for alwayson_script_name in request.alwayson_scripts.keys(): alwayson_script = self.get_script(alwayson_script_name, script_runner) - if alwayson_script == None: + if alwayson_script is None: raise HTTPException(status_code=422, detail=f"always on script {alwayson_script_name} not found") # Selectable script in always on script param check - if alwayson_script.alwayson == False: - raise HTTPException(status_code=422, detail=f"Cannot have a selectable script in the always on scripts params") + if alwayson_script.alwayson is False: + raise HTTPException(status_code=422, detail="Cannot have a selectable script in the always on scripts params") # always on script with no arg should always run so you don't really need to add them to the requests if "args" in request.alwayson_scripts[alwayson_script_name]: # min between arg length in scriptrunner and arg length in the request @@ -310,7 +310,7 @@ class Api: p.outpath_samples = opts.outdir_txt2img_samples shared.state.begin() - if selectable_scripts != None: + if selectable_scripts is not None: p.script_args = script_args processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here else: @@ -367,7 +367,7 @@ class Api: p.outpath_samples = opts.outdir_img2img_samples shared.state.begin() - if selectable_scripts != None: + if selectable_scripts is not None: p.script_args = script_args processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here else: @@ -642,7 +642,7 @@ class Api: sd_hijack.apply_optimizations() shared.state.end() return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") - except AssertionError as msg: + except AssertionError: shared.state.end() return TrainResponse(info=f"train embedding error: {error}") diff --git a/modules/extras.py b/modules/extras.py index ff4e9c4e..eb4f0b42 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -136,14 +136,14 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ result_is_instruct_pix2pix_model = False if theta_func2: - shared.state.textinfo = f"Loading B" + shared.state.textinfo = "Loading B" print(f"Loading {secondary_model_info.filename}...") theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu') else: theta_1 = None if theta_func1: - shared.state.textinfo = f"Loading C" + shared.state.textinfo = "Loading C" print(f"Loading {tertiary_model_info.filename}...") theta_2 = sd_models.read_state_dict(tertiary_model_info.filename, map_location='cpu') diff --git a/modules/images.py b/modules/images.py index a41965ab..3d5d76cc 100644 --- a/modules/images.py +++ b/modules/images.py @@ -409,13 +409,13 @@ class FilenameGenerator: time_format = args[0] if len(args) > 0 and args[0] != "" else self.default_time_format try: time_zone = pytz.timezone(args[1]) if len(args) > 1 else None - except pytz.exceptions.UnknownTimeZoneError as _: + except pytz.exceptions.UnknownTimeZoneError: time_zone = None time_zone_time = time_datetime.astimezone(time_zone) try: formatted_time = time_zone_time.strftime(time_format) - except (ValueError, TypeError) as _: + except (ValueError, TypeError): formatted_time = time_zone_time.strftime(self.default_time_format) return sanitize_filename_part(formatted_time, replace_spaces=False) diff --git a/modules/img2img.py b/modules/img2img.py index 9fc3a698..cdae301a 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -59,7 +59,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): # try to find corresponding mask for an image using simple filename matching mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image)) # if not found use first one ("same mask for all images" use-case) - if not mask_image_path in inpaint_masks: + if mask_image_path not in inpaint_masks: mask_image_path = inpaint_masks[0] mask_image = Image.open(mask_image_path) p.image_mask = mask_image diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 69665372..e084e948 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -92,7 +92,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): def get_schedule(prompt): try: tree = schedule_parser.parse(prompt) - except lark.exceptions.LarkError as e: + except lark.exceptions.LarkError: if 0: import traceback traceback.print_exc() diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index efd7fca5..9ec1adf2 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -134,6 +134,6 @@ def get_realesrgan_models(scaler): ), ] return models - except Exception as e: + except Exception: print("Error making Real-ESRGAN models list:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) diff --git a/modules/sd_disable_initialization.py b/modules/sd_disable_initialization.py index c4a09d15..9fc89dc6 100644 --- a/modules/sd_disable_initialization.py +++ b/modules/sd_disable_initialization.py @@ -61,7 +61,7 @@ class DisableInitialization: if res is None: res = original(url, *args, local_files_only=False, **kwargs) return res - except Exception as e: + except Exception: return original(url, *args, local_files_only=False, **kwargs) def transformers_utils_hub_get_from_cache(url, *args, local_files_only=False, **kwargs): diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index f4bb0266..d8135211 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -118,7 +118,7 @@ def weighted_forward(sd_model, x, c, w, *args, **kwargs): try: #Delete temporary weights if appended del sd_model._custom_loss_weight - except AttributeError as e: + except AttributeError: pass #If we have an old loss function, reset the loss function to the original one @@ -133,7 +133,7 @@ def apply_weighted_forward(sd_model): def undo_weighted_forward(sd_model): try: del sd_model.weighted_forward - except AttributeError as e: + except AttributeError: pass diff --git a/modules/sd_hijack_ip2p.py b/modules/sd_hijack_ip2p.py index 3c727d3b..41ed54a2 100644 --- a/modules/sd_hijack_ip2p.py +++ b/modules/sd_hijack_ip2p.py @@ -10,4 +10,4 @@ def should_hijack_ip2p(checkpoint_info): ckpt_basename = os.path.basename(checkpoint_info.filename).lower() cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower() - return "pix2pix" in ckpt_basename and not "pix2pix" in cfg_basename + return "pix2pix" in ckpt_basename and "pix2pix" not in cfg_basename diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index f10865cd..b623d53d 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -296,7 +296,6 @@ def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_ if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes: # the big matmul fits into our memory limit; do everything in 1 chunk, # i.e. send it down the unchunked fast-path - query_chunk_size = q_tokens kv_chunk_size = k_tokens with devices.without_autocast(disable=q.dtype == v.dtype): diff --git a/modules/sd_models.py b/modules/sd_models.py index 36f643e1..11c1a344 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -239,7 +239,7 @@ def read_metadata_from_safetensors(filename): if isinstance(v, str) and v[0:1] == '{': try: res[k] = json.loads(v) - except Exception as e: + except Exception: pass return res @@ -467,7 +467,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): try: with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd): sd_model = instantiate_from_config(sd_config.model) - except Exception as e: + except Exception: pass if sd_model is None: @@ -544,7 +544,7 @@ def reload_model_weights(sd_model=None, info=None): try: load_model_weights(sd_model, checkpoint_info, state_dict, timer) - except Exception as e: + except Exception: print("Failed to load checkpoint, restoring previous") load_model_weights(sd_model, current_checkpoint_info, None, timer) raise diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 4368eb63..f753b75f 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -603,7 +603,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st try: vectorSize = list(data['string_to_param'].values())[0].shape[0] - except Exception as e: + except Exception: vectorSize = '?' checkpoint = sd_models.select_checkpoint() diff --git a/modules/ui.py b/modules/ui.py index d02f6e82..2171f3aa 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -246,7 +246,7 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: all_seeds = gen_info.get('all_seeds', [-1]) res = all_seeds[index if 0 <= index < len(all_seeds) else 0] - except json.decoder.JSONDecodeError as e: + except json.decoder.JSONDecodeError: if gen_info_string != '': print("Error parsing JSON generation info:", file=sys.stderr) print(gen_info_string, file=sys.stderr) @@ -736,8 +736,8 @@ def create_ui(): with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' gr.HTML( - f"

Process images in a directory on the same machine where the server is running." + - f"
Use an empty output directory to save pictures normally instead of writing to the output directory." + + "

Process images in a directory on the same machine where the server is running." + + "
Use an empty output directory to save pictures normally instead of writing to the output directory." + f"
Add inpaint batch mask directory to enable inpaint batch processing." f"{hidden}

" ) @@ -746,7 +746,6 @@ def create_ui(): img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch] - img2img_image_inputs = [init_img, sketch, init_img_with_mask, inpaint_color_sketch] for i, tab in enumerate(img2img_tabs): tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab]) @@ -1290,8 +1289,8 @@ def create_ui(): with gr.Column(elem_id='ti_gallery_container'): ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) - ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) - ti_progress = gr.HTML(elem_id="ti_progress", value="") + gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4) + gr.HTML(elem_id="ti_progress", value="") ti_outcome = gr.HTML(elem_id="ti_error", value="") create_embedding.click( @@ -1668,7 +1667,7 @@ def create_ui(): interface.render() if os.path.exists(os.path.join(script_path, "notification.mp3")): - audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) + gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) footer = shared.html("footer.html") footer = footer.format(versions=versions_html()) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index d9faf85a..ed70abe5 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -490,7 +490,7 @@ def create_ui(): config_states.list_config_states() with gr.Blocks(analytics_enabled=False) as ui: - with gr.Tabs(elem_id="tabs_extensions") as tabs: + with gr.Tabs(elem_id="tabs_extensions"): with gr.TabItem("Installed", id="installed"): with gr.Row(elem_id="extensions_installed_top"): diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 8c3dea56..49e06289 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -263,7 +263,7 @@ def create_ui(container, button, tabname): ui.stored_extra_pages = pages_in_preferred_order(extra_pages.copy()) ui.tabname = tabname - with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs: + with gr.Tabs(elem_id=tabname+"_extra_tabs"): for page in ui.stored_extra_pages: page_id = page.title.lower().replace(" ", "_") -- cgit v1.2.1 From 96d6ca4199e7c5eee8d451618de5161cea317c40 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 08:25:25 +0300 Subject: manual fixes for ruff --- modules/api/api.py | 129 +++++++++++++++-------------- modules/api/models.py | 5 +- modules/codeformer/codeformer_arch.py | 2 +- modules/esrgan_model_arch.py | 2 + modules/extra_networks_hypernet.py | 2 +- modules/images.py | 4 +- modules/img2img.py | 1 - modules/interrogate.py | 1 - modules/modelloader.py | 6 +- modules/models/diffusion/ddpm_edit.py | 26 +++--- modules/models/diffusion/uni_pc/sampler.py | 3 +- modules/processing.py | 2 +- modules/prompt_parser.py | 11 ++- modules/textual_inversion/autocrop.py | 2 +- modules/ui.py | 8 +- modules/upscaler.py | 2 +- 16 files changed, 101 insertions(+), 105 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index d47c39fc..f52d371b 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -15,7 +15,8 @@ from secrets import compare_digest import modules.shared as shared from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing -from modules.api.models import * +from modules.api import models +from modules.shared import opts from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.textual_inversion.textual_inversion import create_embedding, train_embedding from modules.textual_inversion.preprocess import preprocess @@ -25,20 +26,21 @@ from modules.sd_models import checkpoints_list, unload_model_weights, reload_mod from modules.sd_models_config import find_checkpoint_config_near_filename from modules.realesrgan_model import get_realesrgan_models from modules import devices -from typing import List +from typing import Dict, List, Any import piexif import piexif.helper + def upscaler_to_index(name: str): try: return [x.name.lower() for x in shared.sd_upscalers].index(name.lower()) - except: - raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in sd_upscalers])}") + except Exception: + raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in shared.sd_upscalers])}") def script_name_to_index(name, scripts): try: return [script.title().lower() for script in scripts].index(name.lower()) - except: + except Exception: raise HTTPException(status_code=422, detail=f"Script '{name}' not found") def validate_sampler_name(name): @@ -99,7 +101,7 @@ def api_middleware(app: FastAPI): import starlette # importing just so it can be placed on silent list from rich.console import Console console = Console() - except: + except Exception: import traceback rich_available = False @@ -166,36 +168,36 @@ class Api: self.app = app self.queue_lock = queue_lock api_middleware(self.app) - self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) - self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) - self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) - self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) - self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse) - self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse) + self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=models.TextToImageResponse) + self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=models.ImageToImageResponse) + self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=models.ExtrasSingleImageResponse) + self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=models.ExtrasBatchImagesResponse) + self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=models.PNGInfoResponse) + self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=models.ProgressResponse) self.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"]) self.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"]) self.add_api_route("/sdapi/v1/skip", self.skip, methods=["POST"]) - self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=OptionsModel) + self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=models.OptionsModel) self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"]) - self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=FlagsModel) - self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[SamplerItem]) - self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[UpscalerItem]) - self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[SDModelItem]) - self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem]) - self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem]) - self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem]) - self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[PromptStyleItem]) - self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=EmbeddingsResponse) + self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel) + self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem]) + self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem]) + self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem]) + self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem]) + self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem]) + self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem]) + self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem]) + self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse) self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"]) - self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=CreateResponse) - self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=CreateResponse) - self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=PreprocessResponse) - self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=TrainResponse) - self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=TrainResponse) - self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=MemoryResponse) + self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse) + self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse) + self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse) + self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse) + self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse) + self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse) self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"]) self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"]) - self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=ScriptsList) + self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList) self.default_script_arg_txt2img = [] self.default_script_arg_img2img = [] @@ -224,7 +226,7 @@ class Api: t2ilist = [str(title.lower()) for title in scripts.scripts_txt2img.titles] i2ilist = [str(title.lower()) for title in scripts.scripts_img2img.titles] - return ScriptsList(txt2img = t2ilist, img2img = i2ilist) + return models.ScriptsList(txt2img=t2ilist, img2img=i2ilist) def get_script(self, script_name, script_runner): if script_name is None or script_name == "": @@ -276,7 +278,7 @@ class Api: script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx] return script_args - def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): + def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcessingAPI): script_runner = scripts.scripts_txt2img if not script_runner.scripts: script_runner.initialize_scripts(False) @@ -320,9 +322,9 @@ class Api: b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else [] - return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js()) + return models.TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js()) - def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI): + def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessingAPI): init_images = img2imgreq.init_images if init_images is None: raise HTTPException(status_code=404, detail="Init image not found") @@ -381,9 +383,9 @@ class Api: img2imgreq.init_images = None img2imgreq.mask = None - return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js()) + return models.ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js()) - def extras_single_image_api(self, req: ExtrasSingleImageRequest): + def extras_single_image_api(self, req: models.ExtrasSingleImageRequest): reqDict = setUpscalers(req) reqDict['image'] = decode_base64_to_image(reqDict['image']) @@ -391,9 +393,9 @@ class Api: with self.queue_lock: result = postprocessing.run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", save_output=False, **reqDict) - return ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1]) + return models.ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1]) - def extras_batch_images_api(self, req: ExtrasBatchImagesRequest): + def extras_batch_images_api(self, req: models.ExtrasBatchImagesRequest): reqDict = setUpscalers(req) image_list = reqDict.pop('imageList', []) @@ -402,15 +404,15 @@ class Api: with self.queue_lock: result = postprocessing.run_extras(extras_mode=1, image_folder=image_folder, image="", input_dir="", output_dir="", save_output=False, **reqDict) - return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) + return models.ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) - def pnginfoapi(self, req: PNGInfoRequest): + def pnginfoapi(self, req: models.PNGInfoRequest): if(not req.image.strip()): - return PNGInfoResponse(info="") + return models.PNGInfoResponse(info="") image = decode_base64_to_image(req.image.strip()) if image is None: - return PNGInfoResponse(info="") + return models.PNGInfoResponse(info="") geninfo, items = images.read_info_from_image(image) if geninfo is None: @@ -418,13 +420,13 @@ class Api: items = {**{'parameters': geninfo}, **items} - return PNGInfoResponse(info=geninfo, items=items) + return models.PNGInfoResponse(info=geninfo, items=items) - def progressapi(self, req: ProgressRequest = Depends()): + def progressapi(self, req: models.ProgressRequest = Depends()): # copy from check_progress_call of ui.py if shared.state.job_count == 0: - return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo) + return models.ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo) # avoid dividing zero progress = 0.01 @@ -446,9 +448,9 @@ class Api: if shared.state.current_image and not req.skip_current_image: current_image = encode_pil_to_base64(shared.state.current_image) - return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo) + return models.ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo) - def interrogateapi(self, interrogatereq: InterrogateRequest): + def interrogateapi(self, interrogatereq: models.InterrogateRequest): image_b64 = interrogatereq.image if image_b64 is None: raise HTTPException(status_code=404, detail="Image not found") @@ -465,7 +467,7 @@ class Api: else: raise HTTPException(status_code=404, detail="Model not found") - return InterrogateResponse(caption=processed) + return models.InterrogateResponse(caption=processed) def interruptapi(self): shared.state.interrupt() @@ -570,36 +572,36 @@ class Api: filename = create_embedding(**args) # create empty embedding sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used shared.state.end() - return CreateResponse(info=f"create embedding filename: {filename}") + return models.CreateResponse(info=f"create embedding filename: {filename}") except AssertionError as e: shared.state.end() - return TrainResponse(info=f"create embedding error: {e}") + return models.TrainResponse(info=f"create embedding error: {e}") def create_hypernetwork(self, args: dict): try: shared.state.begin() filename = create_hypernetwork(**args) # create empty embedding shared.state.end() - return CreateResponse(info=f"create hypernetwork filename: {filename}") + return models.CreateResponse(info=f"create hypernetwork filename: {filename}") except AssertionError as e: shared.state.end() - return TrainResponse(info=f"create hypernetwork error: {e}") + return models.TrainResponse(info=f"create hypernetwork error: {e}") def preprocess(self, args: dict): try: shared.state.begin() preprocess(**args) # quick operation unless blip/booru interrogation is enabled shared.state.end() - return PreprocessResponse(info = 'preprocess complete') + return models.PreprocessResponse(info = 'preprocess complete') except KeyError as e: shared.state.end() - return PreprocessResponse(info=f"preprocess error: invalid token: {e}") + return models.PreprocessResponse(info=f"preprocess error: invalid token: {e}") except AssertionError as e: shared.state.end() - return PreprocessResponse(info=f"preprocess error: {e}") + return models.PreprocessResponse(info=f"preprocess error: {e}") except FileNotFoundError as e: shared.state.end() - return PreprocessResponse(info=f'preprocess error: {e}') + return models.PreprocessResponse(info=f'preprocess error: {e}') def train_embedding(self, args: dict): try: @@ -617,10 +619,10 @@ class Api: if not apply_optimizations: sd_hijack.apply_optimizations() shared.state.end() - return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") + return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") except AssertionError as msg: shared.state.end() - return TrainResponse(info=f"train embedding error: {msg}") + return models.TrainResponse(info=f"train embedding error: {msg}") def train_hypernetwork(self, args: dict): try: @@ -641,14 +643,15 @@ class Api: if not apply_optimizations: sd_hijack.apply_optimizations() shared.state.end() - return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") + return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") except AssertionError: shared.state.end() - return TrainResponse(info=f"train embedding error: {error}") + return models.TrainResponse(info=f"train embedding error: {error}") def get_memory(self): try: - import os, psutil + import os + import psutil process = psutil.Process(os.getpid()) res = process.memory_info() # only rss is cross-platform guaranteed so we dont rely on other values ram_total = 100 * res.rss / process.memory_percent() # and total memory is calculated as actual value is not cross-platform safe @@ -675,10 +678,10 @@ class Api: 'events': warnings, } else: - cuda = { 'error': 'unavailable' } + cuda = {'error': 'unavailable'} except Exception as err: - cuda = { 'error': f'{err}' } - return MemoryResponse(ram = ram, cuda = cuda) + cuda = {'error': f'{err}'} + return models.MemoryResponse(ram=ram, cuda=cuda) def launch(self, server_name, port): self.app.include_router(self.router) diff --git a/modules/api/models.py b/modules/api/models.py index 4a70f440..4d291076 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -223,8 +223,9 @@ for key in _options: if(_options[key].dest != 'help'): flag = _options[key] _type = str - if _options[key].default is not None: _type = type(_options[key].default) - flags.update({flag.dest: (_type,Field(default=flag.default, description=flag.help))}) + if _options[key].default is not None: + _type = type(_options[key].default) + flags.update({flag.dest: (_type, Field(default=flag.default, description=flag.help))}) FlagsModel = create_model("Flags", **flags) diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index 11dcc3ee..f1a7cf09 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -7,7 +7,7 @@ from torch import nn, Tensor import torch.nn.functional as F from typing import Optional, List -from modules.codeformer.vqgan_arch import * +from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index 6071fea7..7f8bc7c0 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -438,9 +438,11 @@ def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias= padding = padding if pad_type == 'zero' else 0 if convtype=='PartialConv2D': + from torchvision.ops import PartialConv2d # this is definitely not going to work, but PartialConv2d doesn't work anyway and this shuts up static analyzer c = PartialConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) elif convtype=='DeformConv2D': + from torchvision.ops import DeformConv2d # not tested c = DeformConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) elif convtype=='Conv3D': diff --git a/modules/extra_networks_hypernet.py b/modules/extra_networks_hypernet.py index 04f27c9f..aa2a14ef 100644 --- a/modules/extra_networks_hypernet.py +++ b/modules/extra_networks_hypernet.py @@ -1,4 +1,4 @@ -from modules import extra_networks, shared, extra_networks +from modules import extra_networks, shared from modules.hypernetworks import hypernetwork diff --git a/modules/images.py b/modules/images.py index 3d5d76cc..5eb6d855 100644 --- a/modules/images.py +++ b/modules/images.py @@ -472,9 +472,9 @@ def get_next_sequence_number(path, basename): prefix_length = len(basename) for p in os.listdir(path): if p.startswith(basename): - l = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element) + parts = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element) try: - result = max(int(l[0]), result) + result = max(int(parts[0]), result) except ValueError: pass diff --git a/modules/img2img.py b/modules/img2img.py index cdae301a..32b1ecd6 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -13,7 +13,6 @@ from modules.shared import opts, state import modules.shared as shared import modules.processing as processing from modules.ui import plaintext_to_html -import modules.images as images import modules.scripts diff --git a/modules/interrogate.py b/modules/interrogate.py index 9f7d657f..22df9216 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -11,7 +11,6 @@ import torch.hub from torchvision import transforms from torchvision.transforms.functional import InterpolationMode -import modules.shared as shared from modules import devices, paths, shared, lowvram, modelloader, errors blip_image_eval_size = 384 diff --git a/modules/modelloader.py b/modules/modelloader.py index cb85ac4f..cf685000 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -108,12 +108,12 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None): print(f"Moving {file} from {src_path} to {dest_path}.") try: shutil.move(fullpath, dest_path) - except: + except Exception: pass if len(os.listdir(src_path)) == 0: print(f"Removing empty folder: {src_path}") shutil.rmtree(src_path, True) - except: + except Exception: pass @@ -141,7 +141,7 @@ def load_upscalers(): full_model = f"modules.{model_name}_model" try: importlib.import_module(full_model) - except: + except Exception: pass datas = [] diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index f880bc3c..611c2b69 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -479,7 +479,7 @@ class LatentDiffusion(DDPM): self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: + except Exception: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor @@ -891,16 +891,6 @@ class LatentDiffusion(DDPM): c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) - def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset - def rescale_bbox(bbox): - x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - return x0, y0, w, h - - return [rescale_bbox(b) for b in bboxes] - def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): @@ -1171,8 +1161,10 @@ class LatentDiffusion(DDPM): if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) - if callback: callback(i) - if img_callback: img_callback(img, i) + if callback: + callback(i) + if img_callback: + img_callback(img, i) return img, intermediates @torch.no_grad() @@ -1219,8 +1211,10 @@ class LatentDiffusion(DDPM): if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) + if callback: + callback(i) + if img_callback: + img_callback(img, i) if return_intermediates: return img, intermediates @@ -1337,7 +1331,7 @@ class LatentDiffusion(DDPM): if inpaint: # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] + h, w = z.shape[2], z.shape[3] mask = torch.ones(N, h, w).to(self.device) # zeros will be filled in mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. diff --git a/modules/models/diffusion/uni_pc/sampler.py b/modules/models/diffusion/uni_pc/sampler.py index a241c8a7..0a9defa1 100644 --- a/modules/models/diffusion/uni_pc/sampler.py +++ b/modules/models/diffusion/uni_pc/sampler.py @@ -54,7 +54,8 @@ class UniPCSampler(object): if conditioning is not None: if isinstance(conditioning, dict): ctmp = conditioning[list(conditioning.keys())[0]] - while isinstance(ctmp, list): ctmp = ctmp[0] + while isinstance(ctmp, list): + ctmp = ctmp[0] cbs = ctmp.shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") diff --git a/modules/processing.py b/modules/processing.py index 1a76e552..6f5233c1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -664,7 +664,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if not shared.opts.dont_fix_second_order_samplers_schedule: try: step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 - except: + except Exception: pass uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index e084e948..3a720721 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -54,18 +54,21 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): """ def collect_steps(steps, tree): - l = [steps] + res = [steps] + class CollectSteps(lark.Visitor): def scheduled(self, tree): tree.children[-1] = float(tree.children[-1]) if tree.children[-1] < 1: tree.children[-1] *= steps tree.children[-1] = min(steps, int(tree.children[-1])) - l.append(tree.children[-1]) + res.append(tree.children[-1]) + def alternate(self, tree): - l.extend(range(1, steps+1)) + res.extend(range(1, steps+1)) + CollectSteps().visit(tree) - return sorted(set(l)) + return sorted(set(res)) def at_step(step, tree): class AtStep(lark.Transformer): diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index ba1bdcd4..d7d8d2e3 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -185,7 +185,7 @@ def image_face_points(im, settings): try: faces = classifier.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE) - except: + except Exception: continue if len(faces) > 0: diff --git a/modules/ui.py b/modules/ui.py index 2171f3aa..6beda76f 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1,15 +1,9 @@ -import html import json -import math import mimetypes import os -import platform -import random import sys -import tempfile -import time import traceback -from functools import partial, reduce +from functools import reduce import warnings import gradio as gr diff --git a/modules/upscaler.py b/modules/upscaler.py index e2eaa730..0ad4fe99 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -45,7 +45,7 @@ class Upscaler: try: import cv2 self.can_tile = True - except: + except Exception: pass @abstractmethod -- cgit v1.2.1 From f741a98baccae100fcfb40c017b5c35c5cba1b0c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 08:43:42 +0300 Subject: imports cleanup for ruff --- modules/codeformer/codeformer_arch.py | 4 +--- modules/codeformer/vqgan_arch.py | 2 -- modules/codeformer_model.py | 4 +--- modules/config_states.py | 2 +- modules/esrgan_model.py | 2 +- modules/esrgan_model_arch.py | 1 - modules/extensions.py | 1 - modules/generation_parameters_copypaste.py | 4 ---- modules/hypernetworks/hypernetwork.py | 3 +-- modules/hypernetworks/ui.py | 2 -- modules/images.py | 2 +- modules/img2img.py | 5 +---- modules/mac_specific.py | 1 - modules/modelloader.py | 1 - modules/models/diffusion/uni_pc/uni_pc.py | 1 - modules/processing.py | 5 ++--- modules/sd_hijack.py | 2 +- modules/sd_hijack_inpainting.py | 6 ------ modules/sd_hijack_ip2p.py | 5 +---- modules/sd_hijack_xlmr.py | 2 -- modules/sd_models.py | 2 +- modules/sd_models_config.py | 1 - modules/sd_samplers_kdiffusion.py | 1 - modules/sd_vae.py | 3 --- modules/shared.py | 3 --- modules/styles.py | 9 --------- modules/textual_inversion/autocrop.py | 4 +--- modules/textual_inversion/image_embedding.py | 2 +- modules/textual_inversion/preprocess.py | 4 ---- modules/textual_inversion/textual_inversion.py | 1 - modules/txt2img.py | 9 +++------ modules/ui.py | 5 ++--- modules/ui_extra_networks.py | 1 - modules/ui_postprocessing.py | 2 +- modules/upscaler.py | 2 -- modules/xlmr.py | 2 +- 36 files changed, 21 insertions(+), 85 deletions(-) (limited to 'modules') diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index f1a7cf09..00c407de 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -1,14 +1,12 @@ # this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py import math -import numpy as np import torch from torch import nn, Tensor import torch.nn.functional as F -from typing import Optional, List +from typing import Optional from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock -from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY def calc_mean_std(feat, eps=1e-5): diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py index e7293683..820e6b12 100644 --- a/modules/codeformer/vqgan_arch.py +++ b/modules/codeformer/vqgan_arch.py @@ -5,11 +5,9 @@ VQGAN code, adapted from the original created by the Unleashing Transformers aut https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py ''' -import numpy as np import torch import torch.nn as nn import torch.nn.functional as F -import copy from basicsr.utils import get_root_logger from basicsr.utils.registry import ARCH_REGISTRY diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index 8d84bbc9..8e56cb89 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -33,11 +33,9 @@ def setup_model(dirname): try: from torchvision.transforms.functional import normalize from modules.codeformer.codeformer_arch import CodeFormer - from basicsr.utils.download_util import load_file_from_url - from basicsr.utils import imwrite, img2tensor, tensor2img + from basicsr.utils import img2tensor, tensor2img from facelib.utils.face_restoration_helper import FaceRestoreHelper from facelib.detection.retinaface import retinaface - from modules.shared import cmd_opts net_class = CodeFormer diff --git a/modules/config_states.py b/modules/config_states.py index 2ea00929..8f1ff428 100644 --- a/modules/config_states.py +++ b/modules/config_states.py @@ -14,7 +14,7 @@ from collections import OrderedDict import git from modules import shared, extensions -from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path, config_states_dir +from modules.paths_internal import script_path, config_states_dir all_config_states = OrderedDict() diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index f4369257..85aa6934 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -6,7 +6,7 @@ from PIL import Image from basicsr.utils.download_util import load_file_from_url import modules.esrgan_model_arch as arch -from modules import shared, modelloader, images, devices +from modules import modelloader, images, devices from modules.upscaler import Upscaler, UpscalerData from modules.shared import opts diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index 7f8bc7c0..4de9dd8d 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -2,7 +2,6 @@ from collections import OrderedDict import math -import functools import torch import torch.nn as nn import torch.nn.functional as F diff --git a/modules/extensions.py b/modules/extensions.py index 34d9d654..829f8cd9 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -3,7 +3,6 @@ import sys import traceback import time -from datetime import datetime import git from modules import shared diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index fe8b18b2..f1c59c46 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -1,15 +1,11 @@ import base64 -import html import io -import math import os import re -from pathlib import Path import gradio as gr from modules.paths import data_path from modules import shared, ui_tempdir, script_callbacks -import tempfile from PIL import Image re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)' diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 1fc49537..9fe749b7 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -1,4 +1,3 @@ -import csv import datetime import glob import html @@ -18,7 +17,7 @@ from modules.textual_inversion.learn_schedule import LearnRateScheduler from torch import einsum from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_ -from collections import defaultdict, deque +from collections import deque from statistics import stdev, mean diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index 76599f5a..be168736 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -1,6 +1,4 @@ import html -import os -import re import gradio as gr import modules.hypernetworks.hypernetwork diff --git a/modules/images.py b/modules/images.py index 5eb6d855..7392cb8b 100644 --- a/modules/images.py +++ b/modules/images.py @@ -19,7 +19,7 @@ import json import hashlib from modules import sd_samplers, shared, script_callbacks, errors -from modules.shared import opts, cmd_opts +from modules.shared import opts LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) diff --git a/modules/img2img.py b/modules/img2img.py index 32b1ecd6..d704bf90 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -1,12 +1,9 @@ -import math import os -import sys -import traceback import numpy as np from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError -from modules import devices, sd_samplers +from modules import sd_samplers from modules.generation_parameters_copypaste import create_override_settings_dict from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, state diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 40ce2101..5c2f92a1 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -1,6 +1,5 @@ import torch import platform -from modules import paths from modules.sd_hijack_utils import CondFunc from packaging import version diff --git a/modules/modelloader.py b/modules/modelloader.py index cf685000..92ada694 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -1,4 +1,3 @@ -import glob import os import shutil import importlib diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py index 11b330bc..a4c4ef4e 100644 --- a/modules/models/diffusion/uni_pc/uni_pc.py +++ b/modules/models/diffusion/uni_pc/uni_pc.py @@ -1,5 +1,4 @@ import torch -import torch.nn.functional as F import math from tqdm.auto import trange diff --git a/modules/processing.py b/modules/processing.py index 6f5233c1..c3932d6b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -2,7 +2,6 @@ import json import math import os import sys -import warnings import hashlib import torch @@ -11,10 +10,10 @@ from PIL import Image, ImageFilter, ImageOps import random import cv2 from skimage import exposure -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts from modules.sd_hijack import model_hijack from modules.shared import opts, cmd_opts, state import modules.shared as shared diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index d8135211..81573b78 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -3,7 +3,7 @@ from torch.nn.functional import silu from types import MethodType import modules.textual_inversion.textual_inversion -from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint +from modules import devices, sd_hijack_optimizations, shared from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 55a2ce4d..344d75c8 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -1,15 +1,9 @@ -import os import torch -from einops import repeat -from omegaconf import ListConfig - import ldm.models.diffusion.ddpm import ldm.models.diffusion.ddim import ldm.models.diffusion.plms -from ldm.models.diffusion.ddpm import LatentDiffusion -from ldm.models.diffusion.plms import PLMSSampler from ldm.models.diffusion.ddim import DDIMSampler, noise_like from ldm.models.diffusion.sampling_util import norm_thresholding diff --git a/modules/sd_hijack_ip2p.py b/modules/sd_hijack_ip2p.py index 41ed54a2..6fe6b6ff 100644 --- a/modules/sd_hijack_ip2p.py +++ b/modules/sd_hijack_ip2p.py @@ -1,8 +1,5 @@ -import collections import os.path -import sys -import gc -import time + def should_hijack_ip2p(checkpoint_info): from modules import sd_models_config diff --git a/modules/sd_hijack_xlmr.py b/modules/sd_hijack_xlmr.py index 4ac51c38..28528329 100644 --- a/modules/sd_hijack_xlmr.py +++ b/modules/sd_hijack_xlmr.py @@ -1,8 +1,6 @@ -import open_clip.tokenizer import torch from modules import sd_hijack_clip, devices -from modules.shared import opts class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords): diff --git a/modules/sd_models.py b/modules/sd_models.py index 11c1a344..1c09c709 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -565,7 +565,7 @@ def reload_model_weights(sd_model=None, info=None): def unload_model_weights(sd_model=None, info=None): - from modules import lowvram, devices, sd_hijack + from modules import devices, sd_hijack timer = Timer() if model_data.sd_model: diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index 7a79925a..9bfe1237 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -1,4 +1,3 @@ -import re import os import torch diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 0fc9f456..3b8e9622 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -1,7 +1,6 @@ from collections import deque import torch import inspect -import einops import k_diffusion.sampling from modules import prompt_parser, devices, sd_samplers_common diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 521e485a..b7176125 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -1,8 +1,5 @@ -import torch -import safetensors.torch import os import collections -from collections import namedtuple from modules import paths, shared, devices, script_callbacks, sd_models import glob from copy import deepcopy diff --git a/modules/shared.py b/modules/shared.py index 4631965b..44cd2c0c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -1,12 +1,9 @@ -import argparse import datetime import json import os import sys import time -import requests -from PIL import Image import gradio as gr import tqdm diff --git a/modules/styles.py b/modules/styles.py index 11642075..c22769cf 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -1,18 +1,9 @@ -# We need this so Python doesn't complain about the unknown StableDiffusionProcessing-typehint at runtime -from __future__ import annotations - import csv import os import os.path import typing -import collections.abc as abc -import tempfile import shutil -if typing.TYPE_CHECKING: - # Only import this when code is being type-checked, it doesn't have any effect at runtime - from .processing import StableDiffusionProcessing - class PromptStyle(typing.NamedTuple): name: str diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index d7d8d2e3..7770d22f 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -1,10 +1,8 @@ import cv2 import requests import os -from collections import defaultdict -from math import log, sqrt import numpy as np -from PIL import Image, ImageDraw +from PIL import ImageDraw GREEN = "#0F0" BLUE = "#00F" diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py index 5593f88c..ee0e850a 100644 --- a/modules/textual_inversion/image_embedding.py +++ b/modules/textual_inversion/image_embedding.py @@ -2,7 +2,7 @@ import base64 import json import numpy as np import zlib -from PIL import Image, PngImagePlugin, ImageDraw, ImageFont +from PIL import Image, ImageDraw, ImageFont from fonts.ttf import Roboto import torch from modules.shared import opts diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index da0bcb26..d0cad09e 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -1,13 +1,9 @@ import os from PIL import Image, ImageOps import math -import platform -import sys import tqdm -import time from modules import paths, shared, images, deepbooru -from modules.shared import opts, cmd_opts from modules.textual_inversion import autocrop diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index f753b75f..9ed9ba45 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -1,7 +1,6 @@ import os import sys import traceback -import inspect from collections import namedtuple import torch diff --git a/modules/txt2img.py b/modules/txt2img.py index 16841d0f..f022381c 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -1,18 +1,15 @@ import modules.scripts -from modules import sd_samplers +from modules import sd_samplers, processing from modules.generation_parameters_copypaste import create_override_settings_dict -from modules.processing import StableDiffusionProcessing, Processed, StableDiffusionProcessingTxt2Img, \ - StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, cmd_opts import modules.shared as shared -import modules.processing as processing from modules.ui import plaintext_to_html def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, override_settings_texts, *args): override_settings = create_override_settings_dict(override_settings_texts) - p = StableDiffusionProcessingTxt2Img( + p = processing.StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids, @@ -53,7 +50,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step processed = modules.scripts.scripts_txt2img.run(p, *args) if processed is None: - processed = process_images(p) + processed = processing.process_images(p) p.close() diff --git a/modules/ui.py b/modules/ui.py index 6beda76f..f7e57593 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -14,10 +14,10 @@ from PIL import Image, PngImagePlugin from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress -from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton, FormHTML +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path, data_path -from modules.shared import opts, cmd_opts, restricted_opts +from modules.shared import opts, cmd_opts import modules.codeformer_model import modules.generation_parameters_copypaste as parameters_copypaste @@ -28,7 +28,6 @@ import modules.shared as shared import modules.styles import modules.textual_inversion.ui from modules import prompt_parser -from modules.images import save_image from modules.sd_hijack import model_hijack from modules.sd_samplers import samplers, samplers_for_img2img from modules.textual_inversion import textual_inversion diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 49e06289..800e467a 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -1,4 +1,3 @@ -import glob import os.path import urllib.parse from pathlib import Path diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index f25639e5..c7dc1154 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -1,5 +1,5 @@ import gradio as gr -from modules import scripts_postprocessing, scripts, shared, gfpgan_model, codeformer_model, ui_common, postprocessing, call_queue +from modules import scripts, shared, ui_common, postprocessing, call_queue import modules.generation_parameters_copypaste as parameters_copypaste diff --git a/modules/upscaler.py b/modules/upscaler.py index 0ad4fe99..777593b0 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -2,8 +2,6 @@ import os from abc import abstractmethod import PIL -import numpy as np -import torch from PIL import Image import modules.shared diff --git a/modules/xlmr.py b/modules/xlmr.py index beab3fdf..e056c3f6 100644 --- a/modules/xlmr.py +++ b/modules/xlmr.py @@ -1,4 +1,4 @@ -from transformers import BertPreTrainedModel,BertModel,BertConfig +from transformers import BertPreTrainedModel, BertConfig import torch.nn as nn import torch from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig -- cgit v1.2.1 From 4b854806d98cf5ccd48e5cd99c172613da7937f0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 09:02:23 +0300 Subject: F401 fixes for ruff --- modules/cmd_args.py | 2 +- modules/deepbooru.py | 1 - modules/extensions.py | 2 +- modules/gfpgan_model.py | 2 +- modules/models/diffusion/uni_pc/__init__.py | 2 +- modules/paths.py | 4 ++-- modules/realesrgan_model.py | 6 +++--- modules/script_loading.py | 1 - modules/sd_hijack_inpainting.py | 2 +- modules/sd_models.py | 4 +--- modules/sd_samplers.py | 2 +- modules/shared.py | 2 +- modules/ui.py | 4 ++-- modules/upscaler.py | 2 +- 14 files changed, 16 insertions(+), 20 deletions(-) (limited to 'modules') diff --git a/modules/cmd_args.py b/modules/cmd_args.py index d906a571..e01ca655 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -1,6 +1,6 @@ import argparse import os -from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file +from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file # noqa: F401 parser = argparse.ArgumentParser() diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 122fce7f..1c4554a2 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -2,7 +2,6 @@ import os import re import torch -from PIL import Image import numpy as np from modules import modelloader, paths, deepbooru_model, devices, images, shared diff --git a/modules/extensions.py b/modules/extensions.py index 829f8cd9..bc2c0450 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -6,7 +6,7 @@ import time import git from modules import shared -from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path +from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401 extensions = [] diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index fbe6215a..0131dea4 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -78,7 +78,7 @@ def setup_model(dirname): try: from gfpgan import GFPGANer - from facexlib import detection, parsing + from facexlib import detection, parsing # noqa: F401 global user_path global have_gfpgan global gfpgan_constructor diff --git a/modules/models/diffusion/uni_pc/__init__.py b/modules/models/diffusion/uni_pc/__init__.py index e1265e3f..dbb35964 100644 --- a/modules/models/diffusion/uni_pc/__init__.py +++ b/modules/models/diffusion/uni_pc/__init__.py @@ -1 +1 @@ -from .sampler import UniPCSampler +from .sampler import UniPCSampler # noqa: F401 diff --git a/modules/paths.py b/modules/paths.py index acf1894b..5f6474c0 100644 --- a/modules/paths.py +++ b/modules/paths.py @@ -1,8 +1,8 @@ import os import sys -from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir +from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir # noqa: F401 -import modules.safe +import modules.safe # noqa: F401 # data_path = cmd_opts_pre.data diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index 9ec1adf2..c24d8dbb 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -17,9 +17,9 @@ class UpscalerRealESRGAN(Upscaler): self.user_path = path super().__init__() try: - from basicsr.archs.rrdbnet_arch import RRDBNet - from realesrgan import RealESRGANer - from realesrgan.archs.srvgg_arch import SRVGGNetCompact + from basicsr.archs.rrdbnet_arch import RRDBNet # noqa: F401 + from realesrgan import RealESRGANer # noqa: F401 + from realesrgan.archs.srvgg_arch import SRVGGNetCompact # noqa: F401 self.enable = True self.scalers = [] scalers = self.load_models(path) diff --git a/modules/script_loading.py b/modules/script_loading.py index a7d2203f..57b15862 100644 --- a/modules/script_loading.py +++ b/modules/script_loading.py @@ -2,7 +2,6 @@ import os import sys import traceback import importlib.util -from types import ModuleType def load_module(path): diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 344d75c8..058575b7 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -4,7 +4,7 @@ import ldm.models.diffusion.ddpm import ldm.models.diffusion.ddim import ldm.models.diffusion.plms -from ldm.models.diffusion.ddim import DDIMSampler, noise_like +from ldm.models.diffusion.ddim import noise_like from ldm.models.diffusion.sampling_util import norm_thresholding diff --git a/modules/sd_models.py b/modules/sd_models.py index 1c09c709..d1e946a5 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -15,7 +15,6 @@ import ldm.modules.midas as midas from ldm.util import instantiate_from_config from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config -from modules.paths import models_path from modules.sd_hijack_inpainting import do_inpainting_hijack from modules.timer import Timer @@ -87,8 +86,7 @@ class CheckpointInfo: try: # this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start. - - from transformers import logging, CLIPModel + from transformers import logging, CLIPModel # noqa: F401 logging.set_verbosity_error() except Exception: diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index ff361f22..4f1bf21d 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -1,7 +1,7 @@ from modules import sd_samplers_compvis, sd_samplers_kdiffusion, shared # imports for functions that previously were here and are used by other modules -from modules.sd_samplers_common import samples_to_image_grid, sample_to_image +from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401 all_samplers = [ *sd_samplers_kdiffusion.samplers_data_k_diffusion, diff --git a/modules/shared.py b/modules/shared.py index 44cd2c0c..7d70f041 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -12,7 +12,7 @@ import modules.memmon import modules.styles import modules.devices as devices from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args -from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir +from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401 from ldm.models.diffusion.ddpm import LatentDiffusion demo = None diff --git a/modules/ui.py b/modules/ui.py index f7e57593..782b569d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -10,10 +10,10 @@ import gradio as gr import gradio.routes import gradio.utils import numpy as np -from PIL import Image, PngImagePlugin +from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress +from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path, data_path diff --git a/modules/upscaler.py b/modules/upscaler.py index 777593b0..e145be30 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -41,7 +41,7 @@ class Upscaler: os.makedirs(self.model_path, exist_ok=True) try: - import cv2 + import cv2 # noqa: F401 self.can_tile = True except Exception: pass -- cgit v1.2.1 From 028d3f6425d85f122027c127fba8bcbf4f66ee75 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 11:05:02 +0300 Subject: ruff auto fixes --- modules/config_states.py | 2 +- modules/deepbooru.py | 2 +- modules/devices.py | 2 +- modules/hypernetworks/hypernetwork.py | 2 +- modules/hypernetworks/ui.py | 4 ++-- modules/interrogate.py | 2 +- modules/modelloader.py | 2 +- modules/models/diffusion/ddpm_edit.py | 4 ++-- modules/scripts_auto_postprocessing.py | 2 +- modules/sd_hijack.py | 2 +- modules/sd_hijack_optimizations.py | 14 +++++++------- modules/sd_samplers_compvis.py | 2 +- modules/sd_samplers_kdiffusion.py | 2 +- modules/shared.py | 6 +++--- modules/textual_inversion/textual_inversion.py | 2 +- modules/ui.py | 8 ++++---- modules/ui_extra_networks.py | 4 ++-- modules/ui_tempdir.py | 2 +- 18 files changed, 32 insertions(+), 32 deletions(-) (limited to 'modules') diff --git a/modules/config_states.py b/modules/config_states.py index 8f1ff428..75da862a 100644 --- a/modules/config_states.py +++ b/modules/config_states.py @@ -35,7 +35,7 @@ def list_config_states(): j["filepath"] = path config_states.append(j) - config_states = list(sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)) + config_states = sorted(config_states, key=lambda cs: cs["created_at"], reverse=True) for cs in config_states: timestamp = time.asctime(time.gmtime(cs["created_at"])) diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 1c4554a2..547e1b4c 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -78,7 +78,7 @@ class DeepDanbooru: res = [] - filtertags = set([x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")]) + filtertags = {x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")} for tag in [x for x in tags if x not in filtertags]: probability = probability_dict[tag] diff --git a/modules/devices.py b/modules/devices.py index c705a3cb..d8a34a0f 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -65,7 +65,7 @@ def enable_tf32(): # enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't # see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407 - if any([torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())]): + if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())): torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 9fe749b7..6ef0bfdf 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -403,7 +403,7 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None): k = self.to_k(context_k) v = self.to_v(context_v) - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v)) sim = einsum('b i d, b j d -> b i j', q, k) * self.scale diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index be168736..e3f9eb13 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -5,13 +5,13 @@ import modules.hypernetworks.hypernetwork from modules import devices, sd_hijack, shared not_available = ["hardswish", "multiheadattention"] -keys = list(x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available) +keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available] def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure) - return gr.Dropdown.update(choices=sorted([x for x in shared.hypernetworks.keys()])), f"Created: {filename}", "" + return gr.Dropdown.update(choices=sorted(shared.hypernetworks.keys())), f"Created: {filename}", "" def train_hypernetwork(*args): diff --git a/modules/interrogate.py b/modules/interrogate.py index 22df9216..a1c8e537 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -159,7 +159,7 @@ class InterrogateModels: text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)] top_count = min(top_count, len(text_array)) - text_tokens = clip.tokenize([text for text in text_array], truncate=True).to(devices.device_interrogate) + text_tokens = clip.tokenize(list(text_array), truncate=True).to(devices.device_interrogate) text_features = self.clip_model.encode_text(text_tokens).type(self.dtype) text_features /= text_features.norm(dim=-1, keepdim=True) diff --git a/modules/modelloader.py b/modules/modelloader.py index 92ada694..25612bf8 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -39,7 +39,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None if os.path.islink(full_path) and not os.path.exists(full_path): print(f"Skipping broken symlink: {full_path}") continue - if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]): + if ext_blacklist is not None and any(full_path.endswith(x) for x in ext_blacklist): continue if full_path not in output: output.append(full_path) diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index 611c2b69..09432117 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -1130,7 +1130,7 @@ class LatentDiffusion(DDPM): if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + [x[:batch_size] for x in cond[key]] for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] @@ -1229,7 +1229,7 @@ class LatentDiffusion(DDPM): if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + [x[:batch_size] for x in cond[key]] for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, diff --git a/modules/scripts_auto_postprocessing.py b/modules/scripts_auto_postprocessing.py index 30d6d658..d63078de 100644 --- a/modules/scripts_auto_postprocessing.py +++ b/modules/scripts_auto_postprocessing.py @@ -17,7 +17,7 @@ class ScriptPostprocessingForMainUI(scripts.Script): return self.postprocessing_controls.values() def postprocess_image(self, p, script_pp, *args): - args_dict = {k: v for k, v in zip(self.postprocessing_controls, args)} + args_dict = dict(zip(self.postprocessing_controls, args)) pp = scripts_postprocessing.PostprocessedImage(script_pp.image) pp.info = {} diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 81573b78..e374aeb8 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -37,7 +37,7 @@ def apply_optimizations(): optimization_method = None - can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(getattr(torch.nn.functional, "scaled_dot_product_attention")) # not everyone has torch 2.x to use sdp + can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) # not everyone has torch 2.x to use sdp if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)): print("Applying xformers cross attention optimization.") diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index b623d53d..a174bbe1 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -49,7 +49,7 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None): v_in = self.to_v(context_v) del context, context_k, context_v, x - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in)) del q_in, k_in, v_in dtype = q.dtype @@ -98,7 +98,7 @@ def split_cross_attention_forward(self, x, context=None, mask=None): del context, x - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in)) del q_in, k_in, v_in r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) @@ -229,7 +229,7 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None): with devices.without_autocast(disable=not shared.opts.upcast_attn): k = k * self.scale - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v)) r = einsum_op(q, k, v) r = r.to(dtype) return self.to_out(rearrange(r, '(b h) n d -> b n (h d)', h=h)) @@ -334,7 +334,7 @@ def xformers_attention_forward(self, x, context=None, mask=None): k_in = self.to_k(context_k) v_in = self.to_v(context_v) - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in)) + q, k, v = (rearrange(t, 'b n (h d) -> b n h d', h=h) for t in (q_in, k_in, v_in)) del q_in, k_in, v_in dtype = q.dtype @@ -460,7 +460,7 @@ def xformers_attnblock_forward(self, x): k = self.k(h_) v = self.v(h_) b, c, h, w = q.shape - q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) + q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v)) dtype = q.dtype if shared.opts.upcast_attn: q, k = q.float(), k.float() @@ -482,7 +482,7 @@ def sdp_attnblock_forward(self, x): k = self.k(h_) v = self.v(h_) b, c, h, w = q.shape - q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) + q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v)) dtype = q.dtype if shared.opts.upcast_attn: q, k = q.float(), k.float() @@ -506,7 +506,7 @@ def sub_quad_attnblock_forward(self, x): k = self.k(h_) v = self.v(h_) b, c, h, w = q.shape - q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) + q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v)) q = q.contiguous() k = k.contiguous() v = v.contiguous() diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py index bfcc5574..7427648f 100644 --- a/modules/sd_samplers_compvis.py +++ b/modules/sd_samplers_compvis.py @@ -83,7 +83,7 @@ class VanillaStableDiffusionSampler: conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step) - assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers' + assert all(len(conds) == 1 for conds in conds_list), 'composition via AND is not supported for DDIM/PLMS samplers' cond = tensor # for DDIM, shapes must match, we can't just process cond and uncond independently; diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 3b8e9622..2f733cf5 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -86,7 +86,7 @@ class CFGDenoiser(torch.nn.Module): conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) - assert not is_edit_model or all([len(conds) == 1 for conds in conds_list]), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" + assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" batch_size = len(conds_list) repeats = [len(conds_list[i]) for i in range(batch_size)] diff --git a/modules/shared.py b/modules/shared.py index 7d70f041..e2691585 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -381,7 +381,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"), "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"), "extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks), + "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(hypernetworks.keys())}, refresh=reload_hypernetworks), })) options_templates.update(options_section(('ui', "User interface"), { @@ -403,7 +403,7 @@ options_templates.update(options_section(('ui', "User interface"), { "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}), - "hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": [x for x in tab_names]}), + "hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), @@ -583,7 +583,7 @@ class Options: if item.section not in section_ids: section_ids[item.section] = len(section_ids) - self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])} + self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section])) def cast_value(self, key, value): """casts an arbitrary to the same type as this setting's value with key diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 9ed9ba45..c37bb2ad 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -167,7 +167,7 @@ class EmbeddingDatabase: if 'string_to_param' in data: param_dict = data['string_to_param'] if hasattr(param_dict, '_parameters'): - param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11 + param_dict = param_dict._parameters # fix for torch 1.12.1 loading saved file from torch 1.11 assert len(param_dict) == 1, 'embedding file has multiple terms in it' emb = next(iter(param_dict.items()))[1] # diffuser concepts diff --git a/modules/ui.py b/modules/ui.py index 782b569d..84d661b2 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1222,7 +1222,7 @@ def create_ui(): ) def get_textual_inversion_template_names(): - return sorted([x for x in textual_inversion.textual_inversion_templates]) + return sorted(textual_inversion.textual_inversion_templates) with gr.Tab(label="Train", id="train"): gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") @@ -1230,8 +1230,8 @@ def create_ui(): train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") - train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=[x for x in shared.hypernetworks.keys()]) - create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, "refresh_train_hypernetwork_name") + train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=list(shared.hypernetworks.keys())) + create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks.keys())}, "refresh_train_hypernetwork_name") with FormRow(): embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") @@ -1808,7 +1808,7 @@ def create_ui(): if type(x) == gr.Dropdown: def check_dropdown(val): if getattr(x, 'multiselect', False): - return all([value in x.choices for value in val]) + return all(value in x.choices for value in val) else: return val in x.choices diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 800e467a..ab585917 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -26,7 +26,7 @@ def register_page(page): def fetch_file(filename: str = ""): from starlette.responses import FileResponse - if not any([Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs]): + if not any(Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs): raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.") ext = os.path.splitext(filename)[1].lower() @@ -326,7 +326,7 @@ def setup_ui(ui, gallery): is_allowed = False for extra_page in ui.stored_extra_pages: - if any([path_is_parent(x, filename) for x in extra_page.allowed_directories_for_previews()]): + if any(path_is_parent(x, filename) for x in extra_page.allowed_directories_for_previews()): is_allowed = True break diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 46fa9cb0..cac73c51 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -23,7 +23,7 @@ def register_tmp_file(gradio, filename): def check_tmp_file(gradio, filename): if hasattr(gradio, 'temp_file_sets'): - return any([filename in fileset for fileset in gradio.temp_file_sets]) + return any(filename in fileset for fileset in gradio.temp_file_sets) if hasattr(gradio, 'temp_dirs'): return any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in gradio.temp_dirs) -- cgit v1.2.1 From 550256db1ce18778a9d56ff343d844c61b9f9b83 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 11:19:16 +0300 Subject: ruff manual fixes --- modules/api/api.py | 18 ++++++++++++------ modules/codeformer/codeformer_arch.py | 7 +++++-- modules/codeformer/vqgan_arch.py | 4 ++-- modules/generation_parameters_copypaste.py | 4 ++-- modules/models/diffusion/ddpm_edit.py | 14 ++++++++------ modules/models/diffusion/uni_pc/uni_pc.py | 7 +++++-- modules/safe.py | 2 +- modules/sd_samplers_compvis.py | 2 +- modules/textual_inversion/image_embedding.py | 2 +- modules/textual_inversion/learn_schedule.py | 4 ++-- 10 files changed, 39 insertions(+), 25 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index f52d371b..9efb558e 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -34,14 +34,16 @@ import piexif.helper def upscaler_to_index(name: str): try: return [x.name.lower() for x in shared.sd_upscalers].index(name.lower()) - except Exception: - raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in shared.sd_upscalers])}") + except Exception as e: + raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in shared.sd_upscalers])}") from e + def script_name_to_index(name, scripts): try: return [script.title().lower() for script in scripts].index(name.lower()) - except Exception: - raise HTTPException(status_code=422, detail=f"Script '{name}' not found") + except Exception as e: + raise HTTPException(status_code=422, detail=f"Script '{name}' not found") from e + def validate_sampler_name(name): config = sd_samplers.all_samplers_map.get(name, None) @@ -50,20 +52,23 @@ def validate_sampler_name(name): return name + def setUpscalers(req: dict): reqDict = vars(req) reqDict['extras_upscaler_1'] = reqDict.pop('upscaler_1', None) reqDict['extras_upscaler_2'] = reqDict.pop('upscaler_2', None) return reqDict + def decode_base64_to_image(encoding): if encoding.startswith("data:image/"): encoding = encoding.split(";")[1].split(",")[1] try: image = Image.open(BytesIO(base64.b64decode(encoding))) return image - except Exception: - raise HTTPException(status_code=500, detail="Invalid encoded image") + except Exception as e: + raise HTTPException(status_code=500, detail="Invalid encoded image") from e + def encode_pil_to_base64(image): with io.BytesIO() as output_bytes: @@ -94,6 +99,7 @@ def encode_pil_to_base64(image): return base64.b64encode(bytes_data) + def api_middleware(app: FastAPI): rich_available = True try: diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index 00c407de..ff1c0b4b 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -161,10 +161,13 @@ class Fuse_sft_block(nn.Module): class CodeFormer(VQAutoEncoder): def __init__(self, dim_embd=512, n_head=8, n_layers=9, codebook_size=1024, latent_size=256, - connect_list=['32', '64', '128', '256'], - fix_modules=['quantize','generator']): + connect_list=None, + fix_modules=None): super(CodeFormer, self).__init__(512, 64, [1, 2, 2, 4, 4, 8], 'nearest',2, [16], codebook_size) + connect_list = connect_list or ['32', '64', '128', '256'] + fix_modules = fix_modules or ['quantize', 'generator'] + if fix_modules is not None: for module in fix_modules: for param in getattr(self, module).parameters(): diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py index 820e6b12..b24a0394 100644 --- a/modules/codeformer/vqgan_arch.py +++ b/modules/codeformer/vqgan_arch.py @@ -326,7 +326,7 @@ class Generator(nn.Module): @ARCH_REGISTRY.register() class VQAutoEncoder(nn.Module): - def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=[16], codebook_size=1024, emb_dim=256, + def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=None, codebook_size=1024, emb_dim=256, beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None): super().__init__() logger = get_root_logger() @@ -337,7 +337,7 @@ class VQAutoEncoder(nn.Module): self.embed_dim = emb_dim self.ch_mult = ch_mult self.resolution = img_size - self.attn_resolutions = attn_resolutions + self.attn_resolutions = attn_resolutions or [16] self.quantizer_type = quantizer self.encoder = Encoder( self.in_channels, diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index f1c59c46..7fbbe707 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -19,14 +19,14 @@ registered_param_bindings = [] class ParamBinding: - def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=[]): + def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=None): self.paste_button = paste_button self.tabname = tabname self.source_text_component = source_text_component self.source_image_component = source_image_component self.source_tabname = source_tabname self.override_settings_component = override_settings_component - self.paste_field_names = paste_field_names + self.paste_field_names = paste_field_names or [] def reset(): diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index 09432117..af4dea15 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -52,7 +52,7 @@ class DDPM(pl.LightningModule): beta_schedule="linear", loss_type="l2", ckpt_path=None, - ignore_keys=[], + ignore_keys=None, load_only_unet=False, monitor="val/loss", use_ema=True, @@ -107,7 +107,7 @@ class DDPM(pl.LightningModule): print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet) # If initialing from EMA-only checkpoint, create EMA model after loading. if self.use_ema and not load_ema: @@ -194,7 +194,9 @@ class DDPM(pl.LightningModule): if context is not None: print(f"{context}: Restored training weights") - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + def init_from_ckpt(self, path, ignore_keys=None, only_model=False): + ignore_keys = ignore_keys or [] + sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] @@ -473,7 +475,7 @@ class LatentDiffusion(DDPM): conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) - super().__init__(conditioning_key=conditioning_key, *args, load_ema=load_ema, **kwargs) + super().__init__(*args, conditioning_key=conditioning_key, load_ema=load_ema, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key @@ -1433,10 +1435,10 @@ class Layout2ImgDiffusion(LatentDiffusion): # TODO: move all layout-specific hacks to this class def __init__(self, cond_stage_key, *args, **kwargs): assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' - super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) + super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs) def log_images(self, batch, N=8, *args, **kwargs): - logs = super().log_images(batch=batch, N=N, *args, **kwargs) + logs = super().log_images(*args, batch=batch, N=N, **kwargs) key = 'train' if self.training else 'validation' dset = self.trainer.datamodule.datasets[key] diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py index a4c4ef4e..6f8ad631 100644 --- a/modules/models/diffusion/uni_pc/uni_pc.py +++ b/modules/models/diffusion/uni_pc/uni_pc.py @@ -178,13 +178,13 @@ def model_wrapper( model, noise_schedule, model_type="noise", - model_kwargs={}, + model_kwargs=None, guidance_type="uncond", #condition=None, #unconditional_condition=None, guidance_scale=1., classifier_fn=None, - classifier_kwargs={}, + classifier_kwargs=None, ): """Create a wrapper function for the noise prediction model. @@ -275,6 +275,9 @@ def model_wrapper( A noise prediction model that accepts the noised data and the continuous time as the inputs. """ + model_kwargs = model_kwargs or [] + classifier_kwargs = classifier_kwargs or [] + def get_model_input_time(t_continuous): """ Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. diff --git a/modules/safe.py b/modules/safe.py index e6c2f2c0..2d5b972f 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -104,7 +104,7 @@ def check_pt(filename, extra_handler): def load(filename, *args, **kwargs): - return load_with_extra(filename, extra_handler=global_extra_handler, *args, **kwargs) + return load_with_extra(filename, *args, extra_handler=global_extra_handler, **kwargs) def load_with_extra(filename, extra_handler=None, *args, **kwargs): diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py index 7427648f..b1ee3be7 100644 --- a/modules/sd_samplers_compvis.py +++ b/modules/sd_samplers_compvis.py @@ -55,7 +55,7 @@ class VanillaStableDiffusionSampler: def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs): x_dec, ts, cond, unconditional_conditioning = self.before_sample(x_dec, ts, cond, unconditional_conditioning) - res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs) + res = self.orig_p_sample_ddim(x_dec, cond, ts, *args, unconditional_conditioning=unconditional_conditioning, **kwargs) x_dec, ts, cond, unconditional_conditioning, res = self.after_sample(x_dec, ts, cond, unconditional_conditioning, res) diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py index ee0e850a..d85a4888 100644 --- a/modules/textual_inversion/image_embedding.py +++ b/modules/textual_inversion/image_embedding.py @@ -17,7 +17,7 @@ class EmbeddingEncoder(json.JSONEncoder): class EmbeddingDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): - json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) + json.JSONDecoder.__init__(self, *args, object_hook=self.object_hook, **kwargs) def object_hook(self, d): if 'TORCHTENSOR' in d: diff --git a/modules/textual_inversion/learn_schedule.py b/modules/textual_inversion/learn_schedule.py index f63fc72f..fda58898 100644 --- a/modules/textual_inversion/learn_schedule.py +++ b/modules/textual_inversion/learn_schedule.py @@ -32,8 +32,8 @@ class LearnScheduleIterator: self.maxit += 1 return assert self.rates - except (ValueError, AssertionError): - raise Exception('Invalid learning rate schedule. It should be a number or, for example, like "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000.') + except (ValueError, AssertionError) as e: + raise Exception('Invalid learning rate schedule. It should be a number or, for example, like "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000.') from e def __iter__(self): -- cgit v1.2.1 From a5121e7a0623db328a9462d340d389ed6737374a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 11:37:18 +0300 Subject: fixes for B007 --- modules/codeformer_model.py | 2 +- modules/esrgan_model.py | 8 ++------ modules/extra_networks.py | 2 +- modules/generation_parameters_copypaste.py | 2 +- modules/hypernetworks/hypernetwork.py | 12 ++++++------ modules/images.py | 2 +- modules/interrogate.py | 4 ++-- modules/prompt_parser.py | 14 +++++++------- modules/safe.py | 4 ++-- modules/scripts.py | 10 +++++----- modules/scripts_postprocessing.py | 8 ++++---- modules/sd_hijack_clip.py | 2 +- modules/shared.py | 6 +++--- modules/textual_inversion/learn_schedule.py | 2 +- modules/textual_inversion/textual_inversion.py | 10 +++++----- modules/ui.py | 6 +++--- modules/ui_extra_networks.py | 2 +- modules/ui_tempdir.py | 2 +- modules/upscaler.py | 2 +- 19 files changed, 48 insertions(+), 52 deletions(-) (limited to 'modules') diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index 8e56cb89..ececdbae 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -94,7 +94,7 @@ def setup_model(dirname): self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5) self.face_helper.align_warp_face() - for idx, cropped_face in enumerate(self.face_helper.cropped_faces): + for cropped_face in self.face_helper.cropped_faces: cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True) normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer) diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index 85aa6934..a009eb42 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -16,9 +16,7 @@ def mod2normal(state_dict): # this code is copied from https://github.com/victorca25/iNNfer if 'conv_first.weight' in state_dict: crt_net = {} - items = [] - for k, v in state_dict.items(): - items.append(k) + items = list(state_dict) crt_net['model.0.weight'] = state_dict['conv_first.weight'] crt_net['model.0.bias'] = state_dict['conv_first.bias'] @@ -52,9 +50,7 @@ def resrgan2normal(state_dict, nb=23): if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict: re8x = 0 crt_net = {} - items = [] - for k, v in state_dict.items(): - items.append(k) + items = list(state_dict) crt_net['model.0.weight'] = state_dict['conv_first.weight'] crt_net['model.0.bias'] = state_dict['conv_first.bias'] diff --git a/modules/extra_networks.py b/modules/extra_networks.py index 1978673d..f9db41bc 100644 --- a/modules/extra_networks.py +++ b/modules/extra_networks.py @@ -91,7 +91,7 @@ def deactivate(p, extra_network_data): """call deactivate for extra networks in extra_network_data in specified order, then call deactivate for all remaining registered networks""" - for extra_network_name, extra_network_args in extra_network_data.items(): + for extra_network_name in extra_network_data: extra_network = extra_network_registry.get(extra_network_name, None) if extra_network is None: continue diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 7fbbe707..b0e945a1 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -247,7 +247,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model lines.append(lastline) lastline = '' - for i, line in enumerate(lines): + for line in lines: line = line.strip() if line.startswith("Negative prompt:"): done_with_prompt = True diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 6ef0bfdf..38ef074f 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -177,34 +177,34 @@ class Hypernetwork: def weights(self): res = [] - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: res += layer.parameters() return res def train(self, mode=True): - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: layer.train(mode=mode) for param in layer.parameters(): param.requires_grad = mode def to(self, device): - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: layer.to(device) return self def set_multiplier(self, multiplier): - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: layer.multiplier = multiplier return self def eval(self): - for k, layers in self.layers.items(): + for layers in self.layers.values(): for layer in layers: layer.eval() for param in layer.parameters(): @@ -619,7 +619,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi try: sd_hijack_checkpoint.add() - for i in range((steps-initial_step) * gradient_step): + for _ in range((steps-initial_step) * gradient_step): if scheduler.finished: break if shared.state.interrupted: diff --git a/modules/images.py b/modules/images.py index 7392cb8b..c4e98c75 100644 --- a/modules/images.py +++ b/modules/images.py @@ -149,7 +149,7 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0): return ImageFont.truetype(Roboto, fontsize) def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize): - for i, line in enumerate(lines): + for line in lines: fnt = initial_fnt fontsize = initial_fontsize while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0: diff --git a/modules/interrogate.py b/modules/interrogate.py index a1c8e537..111b1322 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -207,8 +207,8 @@ class InterrogateModels: image_features /= image_features.norm(dim=-1, keepdim=True) - for name, topn, items in self.categories(): - matches = self.rank(image_features, items, top_count=topn) + for cat in self.categories(): + matches = self.rank(image_features, cat.items, top_count=cat.topn) for match, score in matches: if shared.opts.interrogate_return_ranks: res += f", ({match}:{score/100:.3f})" diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 3a720721..b4aff704 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -143,7 +143,7 @@ def get_learned_conditioning(model, prompts, steps): conds = model.get_learned_conditioning(texts) cond_schedule = [] - for i, (end_at_step, text) in enumerate(prompt_schedule): + for i, (end_at_step, _) in enumerate(prompt_schedule): cond_schedule.append(ScheduledPromptConditioning(end_at_step, conds[i])) cache[prompt] = cond_schedule @@ -219,8 +219,8 @@ def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_s res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype) for i, cond_schedule in enumerate(c): target_index = 0 - for current, (end_at, cond) in enumerate(cond_schedule): - if current_step <= end_at: + for current, entry in enumerate(cond_schedule): + if current_step <= entry.end_at_step: target_index = current break res[i] = cond_schedule[target_index].cond @@ -234,13 +234,13 @@ def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step): tensors = [] conds_list = [] - for batch_no, composable_prompts in enumerate(c.batch): + for composable_prompts in c.batch: conds_for_batch = [] - for cond_index, composable_prompt in enumerate(composable_prompts): + for composable_prompt in composable_prompts: target_index = 0 - for current, (end_at, cond) in enumerate(composable_prompt.schedules): - if current_step <= end_at: + for current, entry in enumerate(composable_prompt.schedules): + if current_step <= entry.end_at_step: target_index = current break diff --git a/modules/safe.py b/modules/safe.py index 2d5b972f..1e791c5b 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -95,11 +95,11 @@ def check_pt(filename, extra_handler): except zipfile.BadZipfile: - # if it's not a zip file, it's an olf pytorch format, with five objects written to pickle + # if it's not a zip file, it's an old pytorch format, with five objects written to pickle with open(filename, "rb") as file: unpickler = RestrictedUnpickler(file) unpickler.extra_handler = extra_handler - for i in range(5): + for _ in range(5): unpickler.load() diff --git a/modules/scripts.py b/modules/scripts.py index d945b89f..0c12ebd5 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -231,7 +231,7 @@ def load_scripts(): syspath = sys.path def register_scripts_from_module(module): - for key, script_class in module.__dict__.items(): + for script_class in module.__dict__.values(): if type(script_class) != type: continue @@ -295,9 +295,9 @@ class ScriptRunner: auto_processing_scripts = scripts_auto_postprocessing.create_auto_preprocessing_script_data() - for script_class, path, basedir, script_module in auto_processing_scripts + scripts_data: - script = script_class() - script.filename = path + for script_data in auto_processing_scripts + scripts_data: + script = script_data.script_class() + script.filename = script_data.path script.is_txt2img = not is_img2img script.is_img2img = is_img2img @@ -492,7 +492,7 @@ class ScriptRunner: module = script_loading.load_module(script.filename) cache[filename] = module - for key, script_class in module.__dict__.items(): + for script_class in module.__dict__.values(): if type(script_class) == type and issubclass(script_class, Script): self.scripts[si] = script_class() self.scripts[si].filename = filename diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py index b11568c0..6751406c 100644 --- a/modules/scripts_postprocessing.py +++ b/modules/scripts_postprocessing.py @@ -66,9 +66,9 @@ class ScriptPostprocessingRunner: def initialize_scripts(self, scripts_data): self.scripts = [] - for script_class, path, basedir, script_module in scripts_data: - script: ScriptPostprocessing = script_class() - script.filename = path + for script_data in scripts_data: + script: ScriptPostprocessing = script_data.script_class() + script.filename = script_data.path if script.name == "Simple Upscale": continue @@ -124,7 +124,7 @@ class ScriptPostprocessingRunner: script_args = args[script.args_from:script.args_to] process_args = {} - for (name, component), value in zip(script.controls.items(), script_args): + for (name, component), value in zip(script.controls.items(), script_args): # noqa B007 process_args[name] = value script.process(pp, **process_args) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 9fa5c5c5..c0c350f6 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -223,7 +223,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): self.hijack.fixes = [x.fixes for x in batch_chunk] for fixes in self.hijack.fixes: - for position, embedding in fixes: + for position, embedding in fixes: # noqa: B007 used_embeddings[embedding.name] = embedding z = self.process_tokens(tokens, multipliers) diff --git a/modules/shared.py b/modules/shared.py index e2691585..913c9e63 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -211,7 +211,7 @@ class OptionInfo: def options_section(section_identifier, options_dict): - for k, v in options_dict.items(): + for v in options_dict.values(): v.section = section_identifier return options_dict @@ -579,7 +579,7 @@ class Options: section_ids = {} settings_items = self.data_labels.items() - for k, item in settings_items: + for _, item in settings_items: if item.section not in section_ids: section_ids[item.section] = len(section_ids) @@ -740,7 +740,7 @@ def walk_files(path, allowed_extensions=None): if allowed_extensions is not None: allowed_extensions = set(allowed_extensions) - for root, dirs, files in os.walk(path): + for root, _, files in os.walk(path): for filename in files: if allowed_extensions is not None: _, ext = os.path.splitext(filename) diff --git a/modules/textual_inversion/learn_schedule.py b/modules/textual_inversion/learn_schedule.py index fda58898..c56bea45 100644 --- a/modules/textual_inversion/learn_schedule.py +++ b/modules/textual_inversion/learn_schedule.py @@ -12,7 +12,7 @@ class LearnScheduleIterator: self.it = 0 self.maxit = 0 try: - for i, pair in enumerate(pairs): + for pair in pairs: if not pair.strip(): continue tmp = pair.split(':') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index c37bb2ad..47035332 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -29,7 +29,7 @@ textual_inversion_templates = {} def list_textual_inversion_templates(): textual_inversion_templates.clear() - for root, dirs, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir): + for root, _, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir): for fn in fns: path = os.path.join(root, fn) @@ -198,7 +198,7 @@ class EmbeddingDatabase: if not os.path.isdir(embdir.path): return - for root, dirs, fns in os.walk(embdir.path, followlinks=True): + for root, _, fns in os.walk(embdir.path, followlinks=True): for fn in fns: try: fullfn = os.path.join(root, fn) @@ -215,7 +215,7 @@ class EmbeddingDatabase: def load_textual_inversion_embeddings(self, force_reload=False): if not force_reload: need_reload = False - for path, embdir in self.embedding_dirs.items(): + for embdir in self.embedding_dirs.values(): if embdir.has_changed(): need_reload = True break @@ -228,7 +228,7 @@ class EmbeddingDatabase: self.skipped_embeddings.clear() self.expected_shape = self.get_expected_shape() - for path, embdir in self.embedding_dirs.items(): + for embdir in self.embedding_dirs.values(): self.load_from_dir(embdir) embdir.update() @@ -469,7 +469,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st try: sd_hijack_checkpoint.add() - for i in range((steps-initial_step) * gradient_step): + for _ in range((steps-initial_step) * gradient_step): if scheduler.finished: break if shared.state.interrupted: diff --git a/modules/ui.py b/modules/ui.py index 84d661b2..83bfb7d8 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -416,7 +416,7 @@ def create_sampler_and_steps_selection(choices, tabname): def ordered_ui_categories(): user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder.split(","))} - for i, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): + for _, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)): yield category @@ -1646,7 +1646,7 @@ def create_ui(): with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: with gr.Row(elem_id="quicksettings", variant="compact"): - for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])): + for _i, k, _item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])): component = create_setting_component(k, is_quicksettings=True) component_dict[k] = component @@ -1673,7 +1673,7 @@ def create_ui(): outputs=[text_settings, result], ) - for i, k, item in quicksettings_list: + for _i, k, _item in quicksettings_list: component = component_dict[k] info = opts.data_labels[k] diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index ab585917..2fd82e8e 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -90,7 +90,7 @@ class ExtraNetworksPage: subdirs = {} for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]: - for root, dirs, files in os.walk(parentdir): + for root, dirs, _ in os.walk(parentdir): for dirname in dirs: x = os.path.join(root, dirname) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index cac73c51..f05049e1 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -72,7 +72,7 @@ def cleanup_tmpdr(): if temp_dir == "" or not os.path.isdir(temp_dir): return - for root, dirs, files in os.walk(temp_dir, topdown=False): + for root, _, files in os.walk(temp_dir, topdown=False): for name in files: _, extension = os.path.splitext(name) if extension != ".png": diff --git a/modules/upscaler.py b/modules/upscaler.py index e145be30..8acb6e96 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -55,7 +55,7 @@ class Upscaler: dest_w = int(img.width * scale) dest_h = int(img.height * scale) - for i in range(3): + for _ in range(3): shape = (img.width, img.height) img = self.do_upscale(img, selected_model) -- cgit v1.2.1 From d25219b7e889cf34bccae9cb88497708796efda2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 11:55:09 +0300 Subject: manual fixes for some C408 --- modules/api/api.py | 2 +- modules/models/diffusion/ddpm_edit.py | 8 ++++---- modules/models/diffusion/uni_pc/uni_pc.py | 4 ++-- modules/sd_hijack_inpainting.py | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 9efb558e..594fa655 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -165,7 +165,7 @@ def api_middleware(app: FastAPI): class Api: def __init__(self, app: FastAPI, queue_lock: Lock): if shared.cmd_opts.api_auth: - self.credentials = dict() + self.credentials = {} for auth in shared.cmd_opts.api_auth.split(","): user, password = auth.split(":") self.credentials[user] = password diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index af4dea15..3fb76b65 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -405,7 +405,7 @@ class DDPM(pl.LightningModule): @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): - log = dict() + log = {} x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) @@ -413,7 +413,7 @@ class DDPM(pl.LightningModule): log["inputs"] = x # get diffusion row - diffusion_row = list() + diffusion_row = [] x_start = x[:n_row] for t in range(self.num_timesteps): @@ -1263,7 +1263,7 @@ class LatentDiffusion(DDPM): use_ddim = False - log = dict() + log = {} z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, @@ -1291,7 +1291,7 @@ class LatentDiffusion(DDPM): if plot_diffusion_rows: # get diffusion row - diffusion_row = list() + diffusion_row = [] z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py index 6f8ad631..f6c49f87 100644 --- a/modules/models/diffusion/uni_pc/uni_pc.py +++ b/modules/models/diffusion/uni_pc/uni_pc.py @@ -344,7 +344,7 @@ def model_wrapper( t_in = torch.cat([t_continuous] * 2) if isinstance(condition, dict): assert isinstance(unconditional_condition, dict) - c_in = dict() + c_in = {} for k in condition: if isinstance(condition[k], list): c_in[k] = [torch.cat([ @@ -355,7 +355,7 @@ def model_wrapper( unconditional_condition[k], condition[k]]) elif isinstance(condition, list): - c_in = list() + c_in = [] assert isinstance(unconditional_condition, list) for i in range(len(condition)): c_in.append(torch.cat([unconditional_condition[i], condition[i]])) diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 058575b7..c1977b19 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -23,7 +23,7 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F if isinstance(c, dict): assert isinstance(unconditional_conditioning, dict) - c_in = dict() + c_in = {} for k in c: if isinstance(c[k], list): c_in[k] = [ -- cgit v1.2.1 From 3ec7b705c78b7aca9569c92a419837352c7a4ec6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 21:21:32 +0300 Subject: suggestions and fixes from the PR --- modules/codeformer/codeformer_arch.py | 7 ++----- modules/hypernetworks/ui.py | 4 ++-- modules/models/diffusion/uni_pc/uni_pc.py | 4 ++-- modules/scripts_postprocessing.py | 2 +- modules/sd_hijack_clip.py | 2 +- modules/shared.py | 2 +- modules/textual_inversion/textual_inversion.py | 3 +-- modules/ui.py | 4 ++-- 8 files changed, 12 insertions(+), 16 deletions(-) (limited to 'modules') diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index ff1c0b4b..45c70f84 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -161,13 +161,10 @@ class Fuse_sft_block(nn.Module): class CodeFormer(VQAutoEncoder): def __init__(self, dim_embd=512, n_head=8, n_layers=9, codebook_size=1024, latent_size=256, - connect_list=None, - fix_modules=None): + connect_list=('32', '64', '128', '256'), + fix_modules=('quantize', 'generator')): super(CodeFormer, self).__init__(512, 64, [1, 2, 2, 4, 4, 8], 'nearest',2, [16], codebook_size) - connect_list = connect_list or ['32', '64', '128', '256'] - fix_modules = fix_modules or ['quantize', 'generator'] - if fix_modules is not None: for module in fix_modules: for param in getattr(self, module).parameters(): diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py index e3f9eb13..8b6255e2 100644 --- a/modules/hypernetworks/ui.py +++ b/modules/hypernetworks/ui.py @@ -5,13 +5,13 @@ import modules.hypernetworks.hypernetwork from modules import devices, sd_hijack, shared not_available = ["hardswish", "multiheadattention"] -keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available] +keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict if x not in not_available] def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure) - return gr.Dropdown.update(choices=sorted(shared.hypernetworks.keys())), f"Created: {filename}", "" + return gr.Dropdown.update(choices=sorted(shared.hypernetworks)), f"Created: {filename}", "" def train_hypernetwork(*args): diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py index f6c49f87..a227b947 100644 --- a/modules/models/diffusion/uni_pc/uni_pc.py +++ b/modules/models/diffusion/uni_pc/uni_pc.py @@ -275,8 +275,8 @@ def model_wrapper( A noise prediction model that accepts the noised data and the continuous time as the inputs. """ - model_kwargs = model_kwargs or [] - classifier_kwargs = classifier_kwargs or [] + model_kwargs = model_kwargs or {} + classifier_kwargs = classifier_kwargs or {} def get_model_input_time(t_continuous): """ diff --git a/modules/scripts_postprocessing.py b/modules/scripts_postprocessing.py index 6751406c..bac1335d 100644 --- a/modules/scripts_postprocessing.py +++ b/modules/scripts_postprocessing.py @@ -124,7 +124,7 @@ class ScriptPostprocessingRunner: script_args = args[script.args_from:script.args_to] process_args = {} - for (name, component), value in zip(script.controls.items(), script_args): # noqa B007 + for (name, _component), value in zip(script.controls.items(), script_args): process_args[name] = value script.process(pp, **process_args) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index c0c350f6..cc6e8c21 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -223,7 +223,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): self.hijack.fixes = [x.fixes for x in batch_chunk] for fixes in self.hijack.fixes: - for position, embedding in fixes: # noqa: B007 + for _position, embedding in fixes: used_embeddings[embedding.name] = embedding z = self.process_tokens(tokens, multipliers) diff --git a/modules/shared.py b/modules/shared.py index 913c9e63..ac67adc0 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -381,7 +381,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"), "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"), "extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(hypernetworks.keys())}, refresh=reload_hypernetworks), + "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", hypernetworks]}, refresh=reload_hypernetworks), })) options_templates.update(options_section(('ui', "User interface"), { diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 47035332..9e1b2b9a 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -166,8 +166,7 @@ class EmbeddingDatabase: # textual inversion embeddings if 'string_to_param' in data: param_dict = data['string_to_param'] - if hasattr(param_dict, '_parameters'): - param_dict = param_dict._parameters # fix for torch 1.12.1 loading saved file from torch 1.11 + param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11 assert len(param_dict) == 1, 'embedding file has multiple terms in it' emb = next(iter(param_dict.items()))[1] # diffuser concepts diff --git a/modules/ui.py b/modules/ui.py index 83bfb7d8..7ee99473 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1230,8 +1230,8 @@ def create_ui(): train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") - train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=list(shared.hypernetworks.keys())) - create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks.keys())}, "refresh_train_hypernetwork_name") + train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks)) + create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name") with FormRow(): embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") -- cgit v1.2.1 From 8aa87c564a79965013715d56a5f90d2a34d5d6ee Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 10 May 2023 23:41:08 +0300 Subject: add UI to edit defaults allow setting defaults for elements in extensions' tabs fix a problem with ESRGAN upscalers disappearing after UI reload implicit change: HTML element id for train tab from tab_ti to tab_train (will this break things?) --- modules/modelloader.py | 27 +++---- modules/ui.py | 122 +++++------------------------ modules/ui_loadsave.py | 208 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 237 insertions(+), 120 deletions(-) create mode 100644 modules/ui_loadsave.py (limited to 'modules') diff --git a/modules/modelloader.py b/modules/modelloader.py index 25612bf8..2a479bcb 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -116,20 +116,6 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None): pass -builtin_upscaler_classes = [] -forbidden_upscaler_classes = set() - - -def list_builtin_upscalers(): - builtin_upscaler_classes.clear() - builtin_upscaler_classes.extend(Upscaler.__subclasses__()) - -def forbid_loaded_nonbuiltin_upscalers(): - for cls in Upscaler.__subclasses__(): - if cls not in builtin_upscaler_classes: - forbidden_upscaler_classes.add(cls) - - def load_upscalers(): # We can only do this 'magic' method to dynamically load upscalers if they are referenced, # so we'll try to import any _model.py files before looking in __subclasses__ @@ -145,10 +131,17 @@ def load_upscalers(): datas = [] commandline_options = vars(shared.cmd_opts) - for cls in Upscaler.__subclasses__(): - if cls in forbidden_upscaler_classes: - continue + # some of upscaler classes will not go away after reloading their modules, and we'll end + # up with two copies of those classes. The newest copy will always be the last in the list, + # so we go from end to beginning and ignore duplicates + used_classes = {} + for cls in reversed(Upscaler.__subclasses__()): + classname = str(cls) + if classname not in used_classes: + used_classes[classname] = cls + + for cls in reversed(used_classes.values()): name = cls.__name__ cmd_name = f"{name.lower().replace('upscaler', '')}_models_path" scaler = cls(commandline_options.get(cmd_name, None)) diff --git a/modules/ui.py b/modules/ui.py index 7ee99473..1efb656a 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -13,7 +13,7 @@ import numpy as np from PIL import Image, PngImagePlugin # noqa: F401 from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress +from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path, data_path @@ -86,16 +86,6 @@ def send_gradio_gallery_to_image(x): return None return image_from_url_text(x[0]) -def visit(x, func, path=""): - if hasattr(x, 'children'): - if isinstance(x, gr.Tabs) and x.elem_id is not None: - # Tabs element can't have a label, have to use elem_id instead - func(f"{path}/Tabs@{x.elem_id}", x) - for c in x.children: - visit(c, func, path) - elif x.label is not None: - func(f"{path}/{x.label}", x) - def add_style(name: str, prompt: str, negative_prompt: str): if name is None: @@ -1471,6 +1461,8 @@ def create_ui(): return res + loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) + components = [] component_dict = {} shared.settings_components = component_dict @@ -1558,6 +1550,9 @@ def create_ui(): current_row.__exit__() current_tab.__exit__() + with gr.TabItem("Defaults", id="defaults", elem_id="settings_tab_defaults"): + loadsave.create_ui() + with gr.TabItem("Actions", id="actions", elem_id="settings_tab_actions"): request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications") download_localization = gr.Button(value='Download localization template', elem_id="download_localization") @@ -1631,7 +1626,7 @@ def create_ui(): (extras_interface, "Extras", "extras"), (pnginfo_interface, "PNG Info", "pnginfo"), (modelmerger_interface, "Checkpoint Merger", "modelmerger"), - (train_interface, "Train", "ti"), + (train_interface, "Train", "train"), ] interfaces += script_callbacks.ui_tabs_callback() @@ -1659,6 +1654,16 @@ def create_ui(): with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): interface.render() + for interface, _label, ifid in interfaces: + if ifid in ["extensions", "settings"]: + continue + + loadsave.add_block(interface, ifid) + + loadsave.add_component(f"webui/Tabs@{tabs.elem_id}", tabs) + + loadsave.setup_ui() + if os.path.exists(os.path.join(script_path, "notification.mp3")): gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) @@ -1747,97 +1752,8 @@ def create_ui(): ] ) - ui_config_file = cmd_opts.ui_config_file - ui_settings = {} - settings_count = len(ui_settings) - error_loading = False - - try: - if os.path.exists(ui_config_file): - with open(ui_config_file, "r", encoding="utf8") as file: - ui_settings = json.load(file) - except Exception: - error_loading = True - print("Error loading settings:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - - def loadsave(path, x): - def apply_field(obj, field, condition=None, init_field=None): - key = f"{path}/{field}" - - if getattr(obj, 'custom_script_source', None) is not None: - key = f"customscript/{obj.custom_script_source}/{key}" - - if getattr(obj, 'do_not_save_to_config', False): - return - - saved_value = ui_settings.get(key, None) - if saved_value is None: - ui_settings[key] = getattr(obj, field) - elif condition and not condition(saved_value): - pass - - # this warning is generally not useful; - # print(f'Warning: Bad ui setting value: {key}: {saved_value}; Default value "{getattr(obj, field)}" will be used instead.') - else: - setattr(obj, field, saved_value) - if init_field is not None: - init_field(saved_value) - - if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number, gr.Dropdown, ToolButton] and x.visible: - apply_field(x, 'visible') - - if type(x) == gr.Slider: - apply_field(x, 'value') - apply_field(x, 'minimum') - apply_field(x, 'maximum') - apply_field(x, 'step') - - if type(x) == gr.Radio: - apply_field(x, 'value', lambda val: val in x.choices) - - if type(x) == gr.Checkbox: - apply_field(x, 'value') - - if type(x) == gr.Textbox: - apply_field(x, 'value') - - if type(x) == gr.Number: - apply_field(x, 'value') - - if type(x) == gr.Dropdown: - def check_dropdown(val): - if getattr(x, 'multiselect', False): - return all(value in x.choices for value in val) - else: - return val in x.choices - - apply_field(x, 'value', check_dropdown, getattr(x, 'init_field', None)) - - def check_tab_id(tab_id): - tab_items = list(filter(lambda e: isinstance(e, gr.TabItem), x.children)) - if type(tab_id) == str: - tab_ids = [t.id for t in tab_items] - return tab_id in tab_ids - elif type(tab_id) == int: - return tab_id >= 0 and tab_id < len(tab_items) - else: - return False - - if type(x) == gr.Tabs: - apply_field(x, 'selected', check_tab_id) - - visit(txt2img_interface, loadsave, "txt2img") - visit(img2img_interface, loadsave, "img2img") - visit(extras_interface, loadsave, "extras") - visit(modelmerger_interface, loadsave, "modelmerger") - visit(train_interface, loadsave, "train") - - loadsave(f"webui/Tabs@{tabs.elem_id}", tabs) - - if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)): - with open(ui_config_file, "w", encoding="utf8") as file: - json.dump(ui_settings, file, indent=4) + loadsave.dump_defaults() + demo.ui_loadsave = loadsave # Required as a workaround for change() event not triggering when loading values from ui-config.json interp_description.value = update_interp_description(interp_method.value) diff --git a/modules/ui_loadsave.py b/modules/ui_loadsave.py new file mode 100644 index 00000000..728fec9e --- /dev/null +++ b/modules/ui_loadsave.py @@ -0,0 +1,208 @@ +import json +import os + +import gradio as gr + +from modules import errors +from modules.ui_components import ToolButton + + +class UiLoadsave: + """allows saving and restorig default values for gradio components""" + + def __init__(self, filename): + self.filename = filename + self.ui_settings = {} + self.component_mapping = {} + self.error_loading = False + self.finalized_ui = False + + self.ui_defaults_view = None + self.ui_defaults_apply = None + self.ui_defaults_review = None + + try: + if os.path.exists(self.filename): + self.ui_settings = self.read_from_file() + except Exception as e: + self.error_loading = True + errors.display(e, "loading settings") + + def add_component(self, path, x): + """adds component to the registry of tracked components""" + + assert not self.finalized_ui + + def apply_field(obj, field, condition=None, init_field=None): + key = f"{path}/{field}" + + if getattr(obj, 'custom_script_source', None) is not None: + key = f"customscript/{obj.custom_script_source}/{key}" + + if getattr(obj, 'do_not_save_to_config', False): + return + + saved_value = self.ui_settings.get(key, None) + if saved_value is None: + self.ui_settings[key] = getattr(obj, field) + elif condition and not condition(saved_value): + pass + else: + setattr(obj, field, saved_value) + if init_field is not None: + init_field(saved_value) + + if field == 'value' and key not in self.component_mapping: + self.component_mapping[key] = x + + if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number, gr.Dropdown, ToolButton] and x.visible: + apply_field(x, 'visible') + + if type(x) == gr.Slider: + apply_field(x, 'value') + apply_field(x, 'minimum') + apply_field(x, 'maximum') + apply_field(x, 'step') + + if type(x) == gr.Radio: + apply_field(x, 'value', lambda val: val in x.choices) + + if type(x) == gr.Checkbox: + apply_field(x, 'value') + + if type(x) == gr.Textbox: + apply_field(x, 'value') + + if type(x) == gr.Number: + apply_field(x, 'value') + + if type(x) == gr.Dropdown: + def check_dropdown(val): + if getattr(x, 'multiselect', False): + return all(value in x.choices for value in val) + else: + return val in x.choices + + apply_field(x, 'value', check_dropdown, getattr(x, 'init_field', None)) + + def check_tab_id(tab_id): + tab_items = list(filter(lambda e: isinstance(e, gr.TabItem), x.children)) + if type(tab_id) == str: + tab_ids = [t.id for t in tab_items] + return tab_id in tab_ids + elif type(tab_id) == int: + return 0 <= tab_id < len(tab_items) + else: + return False + + if type(x) == gr.Tabs: + apply_field(x, 'selected', check_tab_id) + + def add_block(self, x, path=""): + """adds all components inside a gradio block x to the registry of tracked components""" + + if hasattr(x, 'children'): + if isinstance(x, gr.Tabs) and x.elem_id is not None: + # Tabs element can't have a label, have to use elem_id instead + self.add_component(f"{path}/Tabs@{x.elem_id}", x) + for c in x.children: + self.add_block(c, path) + elif x.label is not None: + self.add_component(f"{path}/{x.label}", x) + + def read_from_file(self): + with open(self.filename, "r", encoding="utf8") as file: + return json.load(file) + + def write_to_file(self, current_ui_settings): + with open(self.filename, "w", encoding="utf8") as file: + json.dump(current_ui_settings, file, indent=4) + + def dump_defaults(self): + """saves default values to a file unless tjhe file is present and there was an error loading default values at start""" + + if self.error_loading and os.path.exists(self.filename): + return + + self.write_to_file(self.ui_settings) + + def iter_changes(self, current_ui_settings, values): + """ + given a dictionary with defaults from a file and current values from gradio elements, returns + an iterator over tuples of values that are not the same between the file and the current; + tuple contents are: path, old value, new value + """ + + for (path, component), new_value in zip(self.component_mapping.items(), values): + old_value = current_ui_settings.get(path) + + choices = getattr(component, 'choices', None) + if isinstance(new_value, int) and choices: + if new_value >= len(choices): + continue + + new_value = choices[new_value] + + if new_value == old_value: + continue + + if old_value is None and new_value == '' or new_value == []: + continue + + yield path, old_value, new_value + + def ui_view(self, *values): + text = [""] + + for path, old_value, new_value in self.iter_changes(self.read_from_file(), values): + if old_value is None: + old_value = "None" + + text.append(f"") + + if len(text) == 1: + text.append("") + + text.append("") + return "".join(text) + + def ui_apply(self, *values): + num_changed = 0 + + current_ui_settings = self.read_from_file() + + for path, _, new_value in self.iter_changes(current_ui_settings.copy(), values): + num_changed += 1 + current_ui_settings[path] = new_value + + if num_changed == 0: + return "No changes." + + self.write_to_file(current_ui_settings) + + return f"Wrote {num_changed} changes." + + def create_ui(self): + """creates ui elements for editing defaults UI, without adding any logic to them""" + + gr.HTML( + f"This page allows you to change default values in UI elements on other tabs.
" + f"Make your changes, press 'View changes' to review the changed default values,
" + f"then press 'Apply' to write them to {self.filename}.
" + f"New defaults will apply after you restart the UI.
" + ) + + with gr.Row(): + self.ui_defaults_view = gr.Button(value='View changes', elem_id="ui_defaults_view", variant="secondary") + self.ui_defaults_apply = gr.Button(value='Apply', elem_id="ui_defaults_apply", variant="primary") + + self.ui_defaults_review = gr.HTML("") + + def setup_ui(self): + """adds logic to elements created with create_ui; all add_block class must be made before this""" + + assert not self.finalized_ui + self.finalized_ui = True + + self.ui_defaults_view.click(fn=self.ui_view, inputs=list(self.component_mapping.values()), outputs=[self.ui_defaults_review]) + self.ui_defaults_apply.click(fn=self.ui_apply, inputs=list(self.component_mapping.values()), outputs=[self.ui_defaults_review]) -- cgit v1.2.1 From c8732dfa6f763332962d97ff040af156e24a9e62 Mon Sep 17 00:00:00 2001 From: Louis Del Valle <92354925+nero-dv@users.noreply.github.com> Date: Wed, 10 May 2023 22:05:18 -0500 Subject: Update sub_quadratic_attention.py 1. Determine the number of query chunks. 2. Calculate the final shape of the res tensor. 3. Initialize the tensor with the calculated shape and dtype, (same dtype as the input tensors, usually) Can initialize the tensor as a zero-filled tensor with the correct shape and dtype, then compute the attention scores for each query chunk and fill the corresponding slice of tensor. --- modules/sub_quadratic_attention.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'modules') diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py index 05595323..f80c1600 100644 --- a/modules/sub_quadratic_attention.py +++ b/modules/sub_quadratic_attention.py @@ -202,13 +202,22 @@ def efficient_dot_product_attention( value=value, ) - # TODO: maybe we should use torch.empty_like(query) to allocate storage in-advance, - # and pass slices to be mutated, instead of torch.cat()ing the returned slices - res = torch.cat([ - compute_query_chunk_attn( + # slices of res tensor are mutable, modifications made + # to the slices will affect the original tensor. + # if output of compute_query_chunk_attn function has same number of + # dimensions as input query tensor, we initialize tensor like this: + num_query_chunks = int(np.ceil(q_tokens / query_chunk_size)) + query_shape = get_query_chunk(0).shape + res_shape = (query_shape[0], query_shape[1] * num_query_chunks, *query_shape[2:]) + res_dtype = get_query_chunk(0).dtype + res = torch.zeros(res_shape, dtype=res_dtype) + + for i in range(num_query_chunks): + attn_scores = compute_query_chunk_attn( query=get_query_chunk(i * query_chunk_size), key=key, value=value, - ) for i in range(math.ceil(q_tokens / query_chunk_size)) - ], dim=1) + ) + res[:, i * query_chunk_size:(i + 1) * query_chunk_size, :] = attn_scores + return res -- cgit v1.2.1 From ae17e97898af8dd776b20e104ba9a81fe699e4df Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Thu, 11 May 2023 12:26:04 +0800 Subject: UniPC progress bar adjustment --- modules/models/diffusion/uni_pc/uni_pc.py | 70 ++++++++++++++++--------------- 1 file changed, 37 insertions(+), 33 deletions(-) (limited to 'modules') diff --git a/modules/models/diffusion/uni_pc/uni_pc.py b/modules/models/diffusion/uni_pc/uni_pc.py index eb5f4e76..1d1b07bd 100644 --- a/modules/models/diffusion/uni_pc/uni_pc.py +++ b/modules/models/diffusion/uni_pc/uni_pc.py @@ -1,7 +1,7 @@ import torch import torch.nn.functional as F import math -from tqdm.auto import trange +import tqdm class NoiseScheduleVP: @@ -757,40 +757,44 @@ class UniPC: vec_t = timesteps[0].expand((x.shape[0])) model_prev_list = [self.model_fn(x, vec_t)] t_prev_list = [vec_t] - # Init the first `order` values by lower order multistep DPM-Solver. - for init_order in range(1, order): - vec_t = timesteps[init_order].expand(x.shape[0]) - x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, init_order, use_corrector=True) - if model_x is None: - model_x = self.model_fn(x, vec_t) - if self.after_update is not None: - self.after_update(x, model_x) - model_prev_list.append(model_x) - t_prev_list.append(vec_t) - for step in trange(order, steps + 1): - vec_t = timesteps[step].expand(x.shape[0]) - if lower_order_final: - step_order = min(order, steps + 1 - step) - else: - step_order = order - #print('this step order:', step_order) - if step == steps: - #print('do not run corrector at the last step') - use_corrector = False - else: - use_corrector = True - x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, step_order, use_corrector=use_corrector) - if self.after_update is not None: - self.after_update(x, model_x) - for i in range(order - 1): - t_prev_list[i] = t_prev_list[i + 1] - model_prev_list[i] = model_prev_list[i + 1] - t_prev_list[-1] = vec_t - # We do not need to evaluate the final model value. - if step < steps: + with tqdm.tqdm(total=steps) as pbar: + # Init the first `order` values by lower order multistep DPM-Solver. + for init_order in range(1, order): + vec_t = timesteps[init_order].expand(x.shape[0]) + x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, init_order, use_corrector=True) if model_x is None: model_x = self.model_fn(x, vec_t) - model_prev_list[-1] = model_x + if self.after_update is not None: + self.after_update(x, model_x) + model_prev_list.append(model_x) + t_prev_list.append(vec_t) + pbar.update() + + for step in range(order, steps + 1): + vec_t = timesteps[step].expand(x.shape[0]) + if lower_order_final: + step_order = min(order, steps + 1 - step) + else: + step_order = order + #print('this step order:', step_order) + if step == steps: + #print('do not run corrector at the last step') + use_corrector = False + else: + use_corrector = True + x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, step_order, use_corrector=use_corrector) + if self.after_update is not None: + self.after_update(x, model_x) + for i in range(order - 1): + t_prev_list[i] = t_prev_list[i + 1] + model_prev_list[i] = model_prev_list[i + 1] + t_prev_list[-1] = vec_t + # We do not need to evaluate the final model value. + if step < steps: + if model_x is None: + model_x = self.model_fn(x, vec_t) + model_prev_list[-1] = model_x + pbar.update() else: raise NotImplementedError() if denoise_to_zero: -- cgit v1.2.1 From e334758ec281eaf7723c806713721d12bb568e24 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 11 May 2023 07:45:05 +0300 Subject: repair #10266 --- modules/sub_quadratic_attention.py | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) (limited to 'modules') diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py index f80c1600..cc38debd 100644 --- a/modules/sub_quadratic_attention.py +++ b/modules/sub_quadratic_attention.py @@ -201,23 +201,15 @@ def efficient_dot_product_attention( key=key, value=value, ) - - # slices of res tensor are mutable, modifications made - # to the slices will affect the original tensor. - # if output of compute_query_chunk_attn function has same number of - # dimensions as input query tensor, we initialize tensor like this: - num_query_chunks = int(np.ceil(q_tokens / query_chunk_size)) - query_shape = get_query_chunk(0).shape - res_shape = (query_shape[0], query_shape[1] * num_query_chunks, *query_shape[2:]) - res_dtype = get_query_chunk(0).dtype - res = torch.zeros(res_shape, dtype=res_dtype) - - for i in range(num_query_chunks): + + res = torch.zeros_like(query) + for i in range(math.ceil(q_tokens / query_chunk_size)): attn_scores = compute_query_chunk_attn( query=get_query_chunk(i * query_chunk_size), key=key, value=value, ) - res[:, i * query_chunk_size:(i + 1) * query_chunk_size, :] = attn_scores + + res[:, i * query_chunk_size:i * query_chunk_size + attn_scores.shape[1], :] = attn_scores return res -- cgit v1.2.1 From b7e160a87d07b2fd1c12812c43786e141cc86bd5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 11 May 2023 08:14:45 +0300 Subject: change live preview format to jpeg to prevent unreasonably slow previews for large images, and add an option to let user select the format --- modules/progress.py | 4 ++-- modules/shared.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/progress.py b/modules/progress.py index 948e6f00..289dd311 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -95,9 +95,9 @@ def progressapi(req: ProgressRequest): image = shared.state.current_image if image is not None: buffered = io.BytesIO() - image.save(buffered, format="png") + image.save(buffered, format=opts.live_previews_format) base64_image = base64.b64encode(buffered.getvalue()).decode('ascii') - live_preview = f"data:image/png;base64,{base64_image}" + live_preview = f"data:image/{opts.live_previews_format};base64,{base64_image}" id_live_preview = shared.state.id_live_preview else: live_preview = None diff --git a/modules/shared.py b/modules/shared.py index ac67adc0..fc39161e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -420,6 +420,7 @@ options_templates.update(options_section(('infotext', "Infotext"), { options_templates.update(options_section(('ui', "Live previews"), { "show_progressbar": OptionInfo(True, "Show progressbar"), "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), + "live_previews_format": OptionInfo("jpeg", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), "show_progress_every_n_steps": OptionInfo(10, "Show new live preview image every N sampling steps. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}), "show_progress_type": OptionInfo("Approx NN", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap"]}), -- cgit v1.2.1 From 1dcd6723242c3d691610f9ed937951baea49c2d1 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Thu, 11 May 2023 14:29:52 +0800 Subject: Update sd_vae.py There is no need to use split. --- modules/sd_vae.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 17d1f702..95262ca3 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -88,10 +88,9 @@ def refresh_vae_list(): def find_vae_near_checkpoint(checkpoint_file): - checkpoint_path = os.path.basename(checkpoint_file).split('.', 1)[0] + checkpoint_path = os.path.basename(checkpoint_file).rsplit('.', 1)[0] for vae_file in vae_dict.values(): - vae_path = os.path.basename(vae_file).split('.', 1)[0] - if vae_path == checkpoint_path: + if os.path.basename(vae_file).startswith(checkpoint_path): return vae_file return None -- cgit v1.2.1 From 16e4d791224125cef2b91f7cf39893ceffd8bd74 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 10:05:39 +0300 Subject: paths_internal: deduplicate modules_path --- modules/paths_internal.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/paths_internal.py b/modules/paths_internal.py index 6765bafe..a3d3e1f8 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -3,7 +3,8 @@ import argparse import os -script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +modules_path = os.path.dirname(os.path.realpath(__file__)) +script_path = os.path.dirname(modules_path) sd_configs_path = os.path.join(script_path, "configs") sd_default_config = os.path.join(sd_configs_path, "v1-inference.yaml") @@ -12,7 +13,7 @@ default_sd_model_file = sd_model_file # Parse the --data-dir flag first so we can use it as a base for our other argument default values parser_pre = argparse.ArgumentParser(add_help=False) -parser_pre.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored",) +parser_pre.add_argument("--data-dir", type=str, default=os.path.dirname(modules_path), help="base path where all user data is stored", ) cmd_opts_pre = parser_pre.parse_known_args()[0] data_path = cmd_opts_pre.data_dir -- cgit v1.2.1 From df7070eca22278b25c921ef72d3f97a221d66242 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 10:06:19 +0300 Subject: Deduplicate get_font code --- modules/images.py | 13 +++++++------ modules/textual_inversion/image_embedding.py | 9 ++------- 2 files changed, 9 insertions(+), 13 deletions(-) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index c4e98c75..d8527179 100644 --- a/modules/images.py +++ b/modules/images.py @@ -24,6 +24,13 @@ from modules.shared import opts LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) +def get_font(fontsize: int): + try: + return ImageFont.truetype(opts.font or Roboto, fontsize) + except Exception: + return ImageFont.truetype(Roboto, fontsize) + + def image_grid(imgs, batch_size=1, rows=None): if rows is None: if opts.n_rows > 0: @@ -142,12 +149,6 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0): lines.append(word) return lines - def get_font(fontsize): - try: - return ImageFont.truetype(opts.font or Roboto, fontsize) - except Exception: - return ImageFont.truetype(Roboto, fontsize) - def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize): for line in lines: fnt = initial_fnt diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py index d85a4888..5858a55f 100644 --- a/modules/textual_inversion/image_embedding.py +++ b/modules/textual_inversion/image_embedding.py @@ -3,9 +3,7 @@ import json import numpy as np import zlib from PIL import Image, ImageDraw, ImageFont -from fonts.ttf import Roboto import torch -from modules.shared import opts class EmbeddingEncoder(json.JSONEncoder): @@ -136,11 +134,8 @@ def caption_image_overlay(srcimage, title, footerLeft, footerMid, footerRight, t image = srcimage.copy() fontsize = 32 if textfont is None: - try: - textfont = ImageFont.truetype(opts.font or Roboto, fontsize) - textfont = opts.font or Roboto - except Exception: - textfont = Roboto + from modules.images import get_font + textfont = get_font(fontsize) factor = 1.5 gradient = Image.new('RGBA', (1, image.size[1]), color=(0, 0, 0, 0)) -- cgit v1.2.1 From 1332c46b71b169b889d7df420f3285d9022da5cc Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 10:07:01 +0300 Subject: Drop fonts + font-roboto deps since we only use the single regular cut of Roboto --- modules/Roboto-Regular.ttf | Bin 0 -> 305608 bytes modules/images.py | 6 +++--- modules/paths_internal.py | 2 ++ 3 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 modules/Roboto-Regular.ttf (limited to 'modules') diff --git a/modules/Roboto-Regular.ttf b/modules/Roboto-Regular.ttf new file mode 100644 index 00000000..500b1045 Binary files /dev/null and b/modules/Roboto-Regular.ttf differ diff --git a/modules/images.py b/modules/images.py index d8527179..3b8b62d9 100644 --- a/modules/images.py +++ b/modules/images.py @@ -13,12 +13,12 @@ import numpy as np import piexif import piexif.helper from PIL import Image, ImageFont, ImageDraw, PngImagePlugin -from fonts.ttf import Roboto import string import json import hashlib from modules import sd_samplers, shared, script_callbacks, errors +from modules.paths_internal import roboto_ttf_file from modules.shared import opts LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) @@ -26,9 +26,9 @@ LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.L def get_font(fontsize: int): try: - return ImageFont.truetype(opts.font or Roboto, fontsize) + return ImageFont.truetype(opts.font or roboto_ttf_file, fontsize) except Exception: - return ImageFont.truetype(Roboto, fontsize) + return ImageFont.truetype(roboto_ttf_file, fontsize) def image_grid(imgs, batch_size=1, rows=None): diff --git a/modules/paths_internal.py b/modules/paths_internal.py index a3d3e1f8..a23f6d70 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -22,3 +22,5 @@ models_path = os.path.join(data_path, "models") extensions_dir = os.path.join(data_path, "extensions") extensions_builtin_dir = os.path.join(script_path, "extensions-builtin") config_states_dir = os.path.join(script_path, "config_states") + +roboto_ttf_file = os.path.join(modules_path, 'Roboto-Regular.ttf') -- cgit v1.2.1 From 0bfaf613a84613f41946da02571e0e467e88d273 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 11 May 2023 13:30:33 +0300 Subject: put the star where it belongs --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index fc39161e..f387b5ae 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -381,7 +381,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"), "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"), "extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"), - "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", hypernetworks]}, refresh=reload_hypernetworks), + "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), })) options_templates.update(options_section(('ui', "User interface"), { -- cgit v1.2.1 From 098d2fda5250f9f418fc641bd0f185cb461ee6d9 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 18:13:35 +0300 Subject: Reindent autocrop with 4 spaces --- modules/textual_inversion/autocrop.py | 202 +++++++++++++++++----------------- 1 file changed, 102 insertions(+), 100 deletions(-) (limited to 'modules') diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index 7770d22f..8e667a4d 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -10,63 +10,64 @@ RED = "#F00" def crop_image(im, settings): - """ Intelligently crop an image to the subject matter """ - - scale_by = 1 - if is_landscape(im.width, im.height): - scale_by = settings.crop_height / im.height - elif is_portrait(im.width, im.height): - scale_by = settings.crop_width / im.width - elif is_square(im.width, im.height): - if is_square(settings.crop_width, settings.crop_height): - scale_by = settings.crop_width / im.width - elif is_landscape(settings.crop_width, settings.crop_height): - scale_by = settings.crop_width / im.width - elif is_portrait(settings.crop_width, settings.crop_height): - scale_by = settings.crop_height / im.height - - im = im.resize((int(im.width * scale_by), int(im.height * scale_by))) - im_debug = im.copy() - - focus = focal_point(im_debug, settings) - - # take the focal point and turn it into crop coordinates that try to center over the focal - # point but then get adjusted back into the frame - y_half = int(settings.crop_height / 2) - x_half = int(settings.crop_width / 2) - - x1 = focus.x - x_half - if x1 < 0: - x1 = 0 - elif x1 + settings.crop_width > im.width: - x1 = im.width - settings.crop_width - - y1 = focus.y - y_half - if y1 < 0: - y1 = 0 - elif y1 + settings.crop_height > im.height: - y1 = im.height - settings.crop_height - - x2 = x1 + settings.crop_width - y2 = y1 + settings.crop_height - - crop = [x1, y1, x2, y2] - - results = [] - - results.append(im.crop(tuple(crop))) - - if settings.annotate_image: - d = ImageDraw.Draw(im_debug) - rect = list(crop) - rect[2] -= 1 - rect[3] -= 1 - d.rectangle(rect, outline=GREEN) - results.append(im_debug) - if settings.destop_view_image: - im_debug.show() - - return results + """ Intelligently crop an image to the subject matter """ + + scale_by = 1 + if is_landscape(im.width, im.height): + scale_by = settings.crop_height / im.height + elif is_portrait(im.width, im.height): + scale_by = settings.crop_width / im.width + elif is_square(im.width, im.height): + if is_square(settings.crop_width, settings.crop_height): + scale_by = settings.crop_width / im.width + elif is_landscape(settings.crop_width, settings.crop_height): + scale_by = settings.crop_width / im.width + elif is_portrait(settings.crop_width, settings.crop_height): + scale_by = settings.crop_height / im.height + + + im = im.resize((int(im.width * scale_by), int(im.height * scale_by))) + im_debug = im.copy() + + focus = focal_point(im_debug, settings) + + # take the focal point and turn it into crop coordinates that try to center over the focal + # point but then get adjusted back into the frame + y_half = int(settings.crop_height / 2) + x_half = int(settings.crop_width / 2) + + x1 = focus.x - x_half + if x1 < 0: + x1 = 0 + elif x1 + settings.crop_width > im.width: + x1 = im.width - settings.crop_width + + y1 = focus.y - y_half + if y1 < 0: + y1 = 0 + elif y1 + settings.crop_height > im.height: + y1 = im.height - settings.crop_height + + x2 = x1 + settings.crop_width + y2 = y1 + settings.crop_height + + crop = [x1, y1, x2, y2] + + results = [] + + results.append(im.crop(tuple(crop))) + + if settings.annotate_image: + d = ImageDraw.Draw(im_debug) + rect = list(crop) + rect[2] -= 1 + rect[3] -= 1 + d.rectangle(rect, outline=GREEN) + results.append(im_debug) + if settings.destop_view_image: + im_debug.show() + + return results def focal_point(im, settings): corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else [] @@ -86,7 +87,7 @@ def focal_point(im, settings): corner_centroid = None if len(corner_points) > 0: corner_centroid = centroid(corner_points) - corner_centroid.weight = settings.corner_points_weight / weight_pref_total + corner_centroid.weight = settings.corner_points_weight / weight_pref_total pois.append(corner_centroid) entropy_centroid = None @@ -98,7 +99,7 @@ def focal_point(im, settings): face_centroid = None if len(face_points) > 0: face_centroid = centroid(face_points) - face_centroid.weight = settings.face_points_weight / weight_pref_total + face_centroid.weight = settings.face_points_weight / weight_pref_total pois.append(face_centroid) average_point = poi_average(pois, settings) @@ -132,7 +133,7 @@ def focal_point(im, settings): d.rectangle(f.bounding(4), outline=color) d.ellipse(average_point.bounding(max_size), outline=GREEN) - + return average_point @@ -260,10 +261,11 @@ def image_entropy(im): hist = hist[hist > 0] return -np.log2(hist / hist.sum()).sum() + def centroid(pois): - x = [poi.x for poi in pois] - y = [poi.y for poi in pois] - return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois)) + x = [poi.x for poi in pois] + y = [poi.y for poi in pois] + return PointOfInterest(sum(x) / len(pois), sum(y) / len(pois)) def poi_average(pois, settings): @@ -281,59 +283,59 @@ def poi_average(pois, settings): def is_landscape(w, h): - return w > h + return w > h def is_portrait(w, h): - return h > w + return h > w def is_square(w, h): - return w == h + return w == h def download_and_cache_models(dirname): - download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true' - model_file_name = 'face_detection_yunet.onnx' + download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true' + model_file_name = 'face_detection_yunet.onnx' - if not os.path.exists(dirname): - os.makedirs(dirname) + if not os.path.exists(dirname): + os.makedirs(dirname) - cache_file = os.path.join(dirname, model_file_name) - if not os.path.exists(cache_file): - print(f"downloading face detection model from '{download_url}' to '{cache_file}'") - response = requests.get(download_url) - with open(cache_file, "wb") as f: - f.write(response.content) + cache_file = os.path.join(dirname, model_file_name) + if not os.path.exists(cache_file): + print(f"downloading face detection model from '{download_url}' to '{cache_file}'") + response = requests.get(download_url) + with open(cache_file, "wb") as f: + f.write(response.content) - if os.path.exists(cache_file): - return cache_file - return None + if os.path.exists(cache_file): + return cache_file + return None class PointOfInterest: - def __init__(self, x, y, weight=1.0, size=10): - self.x = x - self.y = y - self.weight = weight - self.size = size + def __init__(self, x, y, weight=1.0, size=10): + self.x = x + self.y = y + self.weight = weight + self.size = size - def bounding(self, size): - return [ - self.x - size//2, - self.y - size//2, - self.x + size//2, - self.y + size//2 - ] + def bounding(self, size): + return [ + self.x - size // 2, + self.y - size // 2, + self.x + size // 2, + self.y + size // 2 + ] class Settings: - def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False, dnn_model_path=None): - self.crop_width = crop_width - self.crop_height = crop_height - self.corner_points_weight = corner_points_weight - self.entropy_points_weight = entropy_points_weight - self.face_points_weight = face_points_weight - self.annotate_image = annotate_image - self.destop_view_image = False - self.dnn_model_path = dnn_model_path + def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False, dnn_model_path=None): + self.crop_width = crop_width + self.crop_height = crop_height + self.corner_points_weight = corner_points_weight + self.entropy_points_weight = entropy_points_weight + self.face_points_weight = face_points_weight + self.annotate_image = annotate_image + self.destop_view_image = False + self.dnn_model_path = dnn_model_path -- cgit v1.2.1 From cb3f8ff59fe8f142c3ca074b8cbaaf83357f9dc1 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 11 May 2023 15:55:43 +0000 Subject: Fix symlink scanning --- modules/shared.py | 2 +- modules/ui_extra_networks.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index f387b5ae..210424ac 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -741,7 +741,7 @@ def walk_files(path, allowed_extensions=None): if allowed_extensions is not None: allowed_extensions = set(allowed_extensions) - for root, _, files in os.walk(path): + for root, _, files in os.walk(path, followlinks=True): for filename in files: if allowed_extensions is not None: _, ext = os.path.splitext(filename) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 2fd82e8e..e35d0bfe 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -90,7 +90,7 @@ class ExtraNetworksPage: subdirs = {} for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]: - for root, dirs, _ in os.walk(parentdir): + for root, dirs, _ in os.walk(parentdir, followlinks=True): for dirname in dirs: x = os.path.join(root, dirname) -- cgit v1.2.1 From 49a55b410b66b7dd9be9335d8a2e3a71e4f8b15c Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 18:28:15 +0300 Subject: Autofix Ruff W (not W605) (mostly whitespace) --- modules/api/api.py | 4 +-- modules/api/models.py | 2 +- modules/cmd_args.py | 2 +- modules/codeformer/codeformer_arch.py | 14 +++++----- modules/codeformer/vqgan_arch.py | 38 +++++++++++++------------- modules/esrgan_model_arch.py | 4 +-- modules/extras.py | 2 +- modules/hypernetworks/hypernetwork.py | 12 ++++---- modules/images.py | 2 +- modules/mac_specific.py | 4 +-- modules/masking.py | 2 +- modules/ngrok.py | 4 +-- modules/processing.py | 2 +- modules/script_callbacks.py | 14 +++++----- modules/sd_hijack.py | 12 ++++---- modules/sd_hijack_optimizations.py | 32 +++++++++++----------- modules/sd_models.py | 4 +-- modules/sd_samplers_kdiffusion.py | 18 ++++++------ modules/sub_quadratic_attention.py | 2 +- modules/textual_inversion/dataset.py | 4 +-- modules/textual_inversion/preprocess.py | 2 +- modules/textual_inversion/textual_inversion.py | 16 +++++------ modules/ui.py | 18 ++++++------ modules/ui_extensions.py | 6 ++-- modules/xlmr.py | 6 ++-- 25 files changed, 113 insertions(+), 113 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 594fa655..165985c3 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -227,7 +227,7 @@ class Api: script_idx = script_name_to_index(script_name, script_runner.selectable_scripts) script = script_runner.selectable_scripts[script_idx] return script, script_idx - + def get_scripts_list(self): t2ilist = [str(title.lower()) for title in scripts.scripts_txt2img.titles] i2ilist = [str(title.lower()) for title in scripts.scripts_img2img.titles] @@ -237,7 +237,7 @@ class Api: def get_script(self, script_name, script_runner): if script_name is None or script_name == "": return None, None - + script_idx = script_name_to_index(script_name, script_runner.scripts) return script_runner.scripts[script_idx] diff --git a/modules/api/models.py b/modules/api/models.py index 4d291076..006ccdb7 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -289,4 +289,4 @@ class MemoryResponse(BaseModel): class ScriptsList(BaseModel): txt2img: list = Field(default=None,title="Txt2img", description="Titles of scripts (txt2img)") - img2img: list = Field(default=None,title="Img2img", description="Titles of scripts (img2img)") \ No newline at end of file + img2img: list = Field(default=None,title="Img2img", description="Titles of scripts (img2img)") diff --git a/modules/cmd_args.py b/modules/cmd_args.py index e01ca655..f4a4ab36 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -102,4 +102,4 @@ parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gra parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers") parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) -parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy') \ No newline at end of file +parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy') diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py index 45c70f84..12db6814 100644 --- a/modules/codeformer/codeformer_arch.py +++ b/modules/codeformer/codeformer_arch.py @@ -119,7 +119,7 @@ class TransformerSALayer(nn.Module): tgt_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, query_pos: Optional[Tensor] = None): - + # self attention tgt2 = self.norm1(tgt) q = k = self.with_pos_embed(tgt2, query_pos) @@ -159,7 +159,7 @@ class Fuse_sft_block(nn.Module): @ARCH_REGISTRY.register() class CodeFormer(VQAutoEncoder): - def __init__(self, dim_embd=512, n_head=8, n_layers=9, + def __init__(self, dim_embd=512, n_head=8, n_layers=9, codebook_size=1024, latent_size=256, connect_list=('32', '64', '128', '256'), fix_modules=('quantize', 'generator')): @@ -179,14 +179,14 @@ class CodeFormer(VQAutoEncoder): self.feat_emb = nn.Linear(256, self.dim_embd) # transformer - self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0) + self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0) for _ in range(self.n_layers)]) # logits_predict head self.idx_pred_layer = nn.Sequential( nn.LayerNorm(dim_embd), nn.Linear(dim_embd, codebook_size, bias=False)) - + self.channels = { '16': 512, '32': 256, @@ -221,7 +221,7 @@ class CodeFormer(VQAutoEncoder): enc_feat_dict = {} out_list = [self.fuse_encoder_block[f_size] for f_size in self.connect_list] for i, block in enumerate(self.encoder.blocks): - x = block(x) + x = block(x) if i in out_list: enc_feat_dict[str(x.shape[-1])] = x.clone() @@ -266,11 +266,11 @@ class CodeFormer(VQAutoEncoder): fuse_list = [self.fuse_generator_block[f_size] for f_size in self.connect_list] for i, block in enumerate(self.generator.blocks): - x = block(x) + x = block(x) if i in fuse_list: # fuse after i-th block f_size = str(x.shape[-1]) if w>0: x = self.fuse_convs_dict[f_size](enc_feat_dict[f_size].detach(), x, w) out = x # logits doesn't need softmax before cross_entropy loss - return out, logits, lq_feat \ No newline at end of file + return out, logits, lq_feat diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py index b24a0394..09ee6660 100644 --- a/modules/codeformer/vqgan_arch.py +++ b/modules/codeformer/vqgan_arch.py @@ -13,7 +13,7 @@ from basicsr.utils.registry import ARCH_REGISTRY def normalize(in_channels): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) - + @torch.jit.script def swish(x): @@ -210,15 +210,15 @@ class AttnBlock(nn.Module): # compute attention b, c, h, w = q.shape q = q.reshape(b, c, h*w) - q = q.permute(0, 2, 1) + q = q.permute(0, 2, 1) k = k.reshape(b, c, h*w) - w_ = torch.bmm(q, k) + w_ = torch.bmm(q, k) w_ = w_ * (int(c)**(-0.5)) w_ = F.softmax(w_, dim=2) # attend to values v = v.reshape(b, c, h*w) - w_ = w_.permute(0, 2, 1) + w_ = w_.permute(0, 2, 1) h_ = torch.bmm(v, w_) h_ = h_.reshape(b, c, h, w) @@ -270,18 +270,18 @@ class Encoder(nn.Module): def forward(self, x): for block in self.blocks: x = block(x) - + return x class Generator(nn.Module): def __init__(self, nf, emb_dim, ch_mult, res_blocks, img_size, attn_resolutions): super().__init__() - self.nf = nf - self.ch_mult = ch_mult + self.nf = nf + self.ch_mult = ch_mult self.num_resolutions = len(self.ch_mult) self.num_res_blocks = res_blocks - self.resolution = img_size + self.resolution = img_size self.attn_resolutions = attn_resolutions self.in_channels = emb_dim self.out_channels = 3 @@ -315,24 +315,24 @@ class Generator(nn.Module): blocks.append(nn.Conv2d(block_in_ch, self.out_channels, kernel_size=3, stride=1, padding=1)) self.blocks = nn.ModuleList(blocks) - + def forward(self, x): for block in self.blocks: x = block(x) - + return x - + @ARCH_REGISTRY.register() class VQAutoEncoder(nn.Module): def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=None, codebook_size=1024, emb_dim=256, beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None): super().__init__() logger = get_root_logger() - self.in_channels = 3 - self.nf = nf - self.n_blocks = res_blocks + self.in_channels = 3 + self.nf = nf + self.n_blocks = res_blocks self.codebook_size = codebook_size self.embed_dim = emb_dim self.ch_mult = ch_mult @@ -363,11 +363,11 @@ class VQAutoEncoder(nn.Module): self.kl_weight ) self.generator = Generator( - self.nf, + self.nf, self.embed_dim, - self.ch_mult, - self.n_blocks, - self.resolution, + self.ch_mult, + self.n_blocks, + self.resolution, self.attn_resolutions ) @@ -432,4 +432,4 @@ class VQGANDiscriminator(nn.Module): raise ValueError('Wrong params!') def forward(self, x): - return self.main(x) \ No newline at end of file + return self.main(x) diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py index 4de9dd8d..2b9888ba 100644 --- a/modules/esrgan_model_arch.py +++ b/modules/esrgan_model_arch.py @@ -105,7 +105,7 @@ class ResidualDenseBlock_5C(nn.Module): Modified options that can be used: - "Partial Convolution based Padding" arXiv:1811.11718 - "Spectral normalization" arXiv:1802.05957 - - "ICASSP 2020 - ESRGAN+ : Further Improving ESRGAN" N. C. + - "ICASSP 2020 - ESRGAN+ : Further Improving ESRGAN" N. C. {Rakotonirina} and A. {Rasoanaivo} """ @@ -170,7 +170,7 @@ class GaussianNoise(nn.Module): scale = self.sigma * x.detach() if self.is_relative_detach else self.sigma * x sampled_noise = self.noise.repeat(*x.size()).normal_() * scale x = x + sampled_noise - return x + return x def conv1x1(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) diff --git a/modules/extras.py b/modules/extras.py index eb4f0b42..bdf9b3b7 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -199,7 +199,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ result_is_inpainting_model = True else: theta_0[key] = theta_func2(a, b, multiplier) - + theta_0[key] = to_half(theta_0[key], save_as_half) shared.state.sampling_step += 1 diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 38ef074f..570b5603 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -540,7 +540,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi return hypernetwork, filename scheduler = LearnRateScheduler(learn_rate, steps, initial_step) - + clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else None if clip_grad: clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False) @@ -593,7 +593,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi print(e) scaler = torch.cuda.amp.GradScaler() - + batch_size = ds.batch_size gradient_step = ds.gradient_step # n steps = batch_size * gradient_step * n image processed @@ -636,7 +636,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi if clip_grad: clip_grad_sched.step(hypernetwork.step) - + with devices.autocast(): x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) if use_weight: @@ -657,14 +657,14 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi _loss_step += loss.item() scaler.scale(loss).backward() - + # go back until we reach gradient accumulation steps if (j + 1) % gradient_step != 0: continue loss_logging.append(_loss_step) if clip_grad: clip_grad(weights, clip_grad_sched.learn_rate) - + scaler.step(optimizer) scaler.update() hypernetwork.step += 1 @@ -674,7 +674,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi _loss_step = 0 steps_done = hypernetwork.step + 1 - + epoch_num = hypernetwork.step // steps_per_epoch epoch_step = hypernetwork.step % steps_per_epoch diff --git a/modules/images.py b/modules/images.py index 3b8b62d9..b2de3662 100644 --- a/modules/images.py +++ b/modules/images.py @@ -367,7 +367,7 @@ class FilenameGenerator: self.seed = seed self.prompt = prompt self.image = image - + def hasprompt(self, *args): lower = self.prompt.lower() if self.p is None or self.prompt is None: diff --git a/modules/mac_specific.py b/modules/mac_specific.py index 5c2f92a1..d74c6b95 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -42,7 +42,7 @@ if has_mps: # MPS workaround for https://github.com/pytorch/pytorch/issues/79383 CondFunc('torch.Tensor.to', lambda orig_func, self, *args, **kwargs: orig_func(self.contiguous(), *args, **kwargs), lambda _, self, *args, **kwargs: self.device.type != 'mps' and (args and isinstance(args[0], torch.device) and args[0].type == 'mps' or isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps')) - # MPS workaround for https://github.com/pytorch/pytorch/issues/80800 + # MPS workaround for https://github.com/pytorch/pytorch/issues/80800 CondFunc('torch.nn.functional.layer_norm', lambda orig_func, *args, **kwargs: orig_func(*([args[0].contiguous()] + list(args[1:])), **kwargs), lambda _, *args, **kwargs: args and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps') # MPS workaround for https://github.com/pytorch/pytorch/issues/90532 @@ -60,4 +60,4 @@ if has_mps: # MPS workaround for https://github.com/pytorch/pytorch/issues/92311 if platform.processor() == 'i386': for funcName in ['torch.argmax', 'torch.Tensor.argmax']: - CondFunc(funcName, lambda _, input, *args, **kwargs: torch.max(input.float() if input.dtype == torch.int64 else input, *args, **kwargs)[1], lambda _, input, *args, **kwargs: input.device.type == 'mps') \ No newline at end of file + CondFunc(funcName, lambda _, input, *args, **kwargs: torch.max(input.float() if input.dtype == torch.int64 else input, *args, **kwargs)[1], lambda _, input, *args, **kwargs: input.device.type == 'mps') diff --git a/modules/masking.py b/modules/masking.py index a5c4d2da..be9f84c7 100644 --- a/modules/masking.py +++ b/modules/masking.py @@ -4,7 +4,7 @@ from PIL import Image, ImageFilter, ImageOps def get_crop_region(mask, pad=0): """finds a rectangular region that contains all masked ares in an image. Returns (x1, y1, x2, y2) coordinates of the rectangle. For example, if a user has painted the top-right part of a 512x512 image", the result may be (256, 0, 512, 256)""" - + h, w = mask.shape crop_left = 0 diff --git a/modules/ngrok.py b/modules/ngrok.py index 7a7b4b26..67a74e85 100644 --- a/modules/ngrok.py +++ b/modules/ngrok.py @@ -13,7 +13,7 @@ def connect(token, port, region): config = conf.PyngrokConfig( auth_token=token, region=region ) - + # Guard for existing tunnels existing = ngrok.get_tunnels(pyngrok_config=config) if existing: @@ -24,7 +24,7 @@ def connect(token, port, region): print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n' 'You can use this link after the launch is complete.') return - + try: if account is None: public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True).public_url diff --git a/modules/processing.py b/modules/processing.py index c3932d6b..f902b9df 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -164,7 +164,7 @@ class StableDiffusionProcessing: self.all_subseeds = None self.iteration = 0 self.is_hr_pass = False - + @property def sd_model(self): diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 17109732..7d9dd736 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -32,22 +32,22 @@ class CFGDenoiserParams: def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps, text_cond, text_uncond): self.x = x """Latent image representation in the process of being denoised""" - + self.image_cond = image_cond """Conditioning image""" - + self.sigma = sigma """Current sigma noise step value""" - + self.sampling_step = sampling_step """Current Sampling step number""" - + self.total_sampling_steps = total_sampling_steps """Total number of sampling steps planned""" - + self.text_cond = text_cond """ Encoder hidden states of text conditioning from prompt""" - + self.text_uncond = text_uncond """ Encoder hidden states of text conditioning from negative prompt""" @@ -240,7 +240,7 @@ def add_callback(callbacks, fun): callbacks.append(ScriptCallback(filename, fun)) - + def remove_current_script_callbacks(): stack = [x for x in inspect.stack() if x.filename != __file__] filename = stack[0].filename if len(stack) > 0 else 'unknown file' diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index e374aeb8..7e50f1ab 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -34,7 +34,7 @@ def apply_optimizations(): ldm.modules.diffusionmodules.model.nonlinearity = silu ldm.modules.diffusionmodules.openaimodel.th = sd_hijack_unet.th - + optimization_method = None can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) # not everyone has torch 2.x to use sdp @@ -92,12 +92,12 @@ def fix_checkpoint(): def weighted_loss(sd_model, pred, target, mean=True): #Calculate the weight normally, but ignore the mean loss = sd_model._old_get_loss(pred, target, mean=False) - + #Check if we have weights available weight = getattr(sd_model, '_custom_loss_weight', None) if weight is not None: loss *= weight - + #Return the loss, as mean if specified return loss.mean() if mean else loss @@ -105,7 +105,7 @@ def weighted_forward(sd_model, x, c, w, *args, **kwargs): try: #Temporarily append weights to a place accessible during loss calc sd_model._custom_loss_weight = w - + #Replace 'get_loss' with a weight-aware one. Otherwise we need to reimplement 'forward' completely #Keep 'get_loss', but don't overwrite the previous old_get_loss if it's already set if not hasattr(sd_model, '_old_get_loss'): @@ -120,7 +120,7 @@ def weighted_forward(sd_model, x, c, w, *args, **kwargs): del sd_model._custom_loss_weight except AttributeError: pass - + #If we have an old loss function, reset the loss function to the original one if hasattr(sd_model, '_old_get_loss'): sd_model.get_loss = sd_model._old_get_loss @@ -184,7 +184,7 @@ class StableDiffusionModelHijack: def undo_hijack(self, m): if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation: - m.cond_stage_model = m.cond_stage_model.wrapped + m.cond_stage_model = m.cond_stage_model.wrapped elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords: m.cond_stage_model = m.cond_stage_model.wrapped diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index a174bbe1..f00fe55c 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -62,10 +62,10 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None): end = i + 2 s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end]) s1 *= self.scale - + s2 = s1.softmax(dim=-1) del s1 - + r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end]) del s2 del q, k, v @@ -95,43 +95,43 @@ def split_cross_attention_forward(self, x, context=None, mask=None): with devices.without_autocast(disable=not shared.opts.upcast_attn): k_in = k_in * self.scale - + del context, x - + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in)) del q_in, k_in, v_in - + r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) - + mem_free_total = get_available_vram() - + gb = 1024 ** 3 tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() modifier = 3 if q.element_size() == 2 else 2.5 mem_required = tensor_size * modifier steps = 1 - + if mem_required > mem_free_total: steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2))) # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB " # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}") - + if steps > 64: max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64 raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). ' f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free') - + slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] for i in range(0, q.shape[1], slice_size): end = i + slice_size s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) - + s2 = s1.softmax(dim=-1, dtype=q.dtype) del s1 - + r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) del s2 - + del q, k, v r1 = r1.to(dtype) @@ -228,7 +228,7 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None): with devices.without_autocast(disable=not shared.opts.upcast_attn): k = k * self.scale - + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v)) r = einsum_op(q, k, v) r = r.to(dtype) @@ -369,7 +369,7 @@ def scaled_dot_product_attention_forward(self, x, context=None, mask=None): q = q_in.view(batch_size, -1, h, head_dim).transpose(1, 2) k = k_in.view(batch_size, -1, h, head_dim).transpose(1, 2) v = v_in.view(batch_size, -1, h, head_dim).transpose(1, 2) - + del q_in, k_in, v_in dtype = q.dtype @@ -451,7 +451,7 @@ def cross_attention_attnblock_forward(self, x): h3 += x return h3 - + def xformers_attnblock_forward(self, x): try: h_ = x diff --git a/modules/sd_models.py b/modules/sd_models.py index d1e946a5..3316d021 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -165,7 +165,7 @@ def model_hash(filename): def select_checkpoint(): model_checkpoint = shared.opts.sd_model_checkpoint - + checkpoint_info = checkpoint_alisases.get(model_checkpoint, None) if checkpoint_info is not None: return checkpoint_info @@ -372,7 +372,7 @@ def enable_midas_autodownload(): if not os.path.exists(path): if not os.path.exists(midas_path): mkdir(midas_path) - + print(f"Downloading midas model weights for {model_type} to {path}") request.urlretrieve(midas_urls[model_type], path) print(f"{model_type} downloaded") diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 2f733cf5..e9e41818 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -93,10 +93,10 @@ class CFGDenoiser(torch.nn.Module): if shared.sd_model.model.conditioning_key == "crossattn-adm": image_uncond = torch.zeros_like(image_cond) - make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": c_crossattn, "c_adm": c_adm} + make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": c_crossattn, "c_adm": c_adm} else: image_uncond = image_cond - make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn, "c_concat": [c_concat]} + make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn, "c_concat": [c_concat]} if not is_edit_model: x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) @@ -316,7 +316,7 @@ class KDiffusionSampler: sigma_sched = sigmas[steps - t_enc - 1:] xi = x + noise * sigma_sched[0] - + extra_params_kwargs = self.initialize(p) parameters = inspect.signature(self.func).parameters @@ -339,9 +339,9 @@ class KDiffusionSampler: self.model_wrap_cfg.init_latent = x self.last_latent = x extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, 's_min_uncond': self.s_min_uncond } @@ -374,9 +374,9 @@ class KDiffusionSampler: self.last_latent = x samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, + 'cond': conditioning, + 'image_cond': image_conditioning, + 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, 's_min_uncond': self.s_min_uncond }, disable=False, callback=self.callback_state, **extra_params_kwargs)) diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py index cc38debd..497568eb 100644 --- a/modules/sub_quadratic_attention.py +++ b/modules/sub_quadratic_attention.py @@ -179,7 +179,7 @@ def efficient_dot_product_attention( chunk_idx, min(query_chunk_size, q_tokens) ) - + summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale) summarize_chunk: SummarizeChunk = partial(checkpoint, summarize_chunk) if use_checkpoint else summarize_chunk compute_query_chunk_attn: ComputeQueryChunkAttn = partial( diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 41610e03..b9621fc9 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -118,7 +118,7 @@ class PersonalizedBase(Dataset): weight = torch.ones(latent_sample.shape) else: weight = None - + if latent_sampling_method == "random": entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist, weight=weight) else: @@ -243,4 +243,4 @@ class BatchLoaderRandom(BatchLoader): return self def collate_wrapper_random(batch): - return BatchLoaderRandom(batch) \ No newline at end of file + return BatchLoaderRandom(batch) diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index d0cad09e..a009d8e8 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -125,7 +125,7 @@ def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, thr default=None ) return wh and center_crop(image, *wh) - + def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_keep_original_size, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None): width = process_width diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 9e1b2b9a..d489ed1e 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -323,16 +323,16 @@ def tensorboard_add(tensorboard_writer, loss, global_step, step, learn_rate, epo tensorboard_add_scaler(tensorboard_writer, f"Learn rate/train/epoch-{epoch_num}", learn_rate, step) def tensorboard_add_scaler(tensorboard_writer, tag, value, step): - tensorboard_writer.add_scalar(tag=tag, + tensorboard_writer.add_scalar(tag=tag, scalar_value=value, global_step=step) def tensorboard_add_image(tensorboard_writer, tag, pil_image, step): # Convert a pil image to a torch tensor img_tensor = torch.as_tensor(np.array(pil_image, copy=True)) - img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0], + img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0], len(pil_image.getbands())) img_tensor = img_tensor.permute((2, 0, 1)) - + tensorboard_writer.add_image(tag, img_tensor, global_step=step) def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_model_every, create_image_every, log_directory, name="embedding"): @@ -402,7 +402,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st if initial_step >= steps: shared.state.textinfo = "Model has already been trained beyond specified max steps" return embedding, filename - + scheduler = LearnRateScheduler(learn_rate, steps, initial_step) clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \ torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \ @@ -412,7 +412,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st # dataset loading may take a while, so input validations and early returns should be done before this shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." old_parallel_processing_allowed = shared.parallel_processing_allowed - + if shared.opts.training_enable_tensorboard: tensorboard_writer = tensorboard_setup(log_directory) @@ -439,7 +439,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st optimizer_saved_dict = torch.load(f"{filename}.optim", map_location='cpu') if embedding.checksum() == optimizer_saved_dict.get('hash', None): optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None) - + if optimizer_state_dict is not None: optimizer.load_state_dict(optimizer_state_dict) print("Loaded existing optimizer from checkpoint") @@ -485,7 +485,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st if clip_grad: clip_grad_sched.step(embedding.step) - + with devices.autocast(): x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) if use_weight: @@ -513,7 +513,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st # go back until we reach gradient accumulation steps if (j + 1) % gradient_step != 0: continue - + if clip_grad: clip_grad(embedding.vec, clip_grad_sched.learn_rate) diff --git a/modules/ui.py b/modules/ui.py index 1efb656a..ff82fff6 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1171,7 +1171,7 @@ def create_ui(): process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") - + with gr.Column(visible=False) as process_multicrop_col: gr.Markdown('Each image is center-cropped with an automatically chosen width and height.') with gr.Row(): @@ -1183,7 +1183,7 @@ def create_ui(): with gr.Row(): process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective") process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold") - + with gr.Row(): with gr.Column(scale=3): gr.HTML(value="") @@ -1226,7 +1226,7 @@ def create_ui(): with FormRow(): embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") - + with FormRow(): clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) @@ -1565,7 +1565,7 @@ def create_ui(): gr.HTML(shared.html("licenses.html"), elem_id="licenses") gr.Button(value="Show all pages", elem_id="settings_show_all_pages") - + def unload_sd_weights(): modules.sd_models.unload_model_weights() @@ -1841,15 +1841,15 @@ def versions_html(): return f""" version: {tag} - •  + • python: {python_version} - •  + • torch: {getattr(torch, '__long_version__',torch.__version__)} - •  + • xformers: {xformers_version} - •  + • gradio: {gr.__version__} - •  + • checkpoint: N/A """ diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index ed70abe5..af497733 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -467,7 +467,7 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=" - + """ for tag in [x for x in extension_tags if x not in tags]: @@ -535,9 +535,9 @@ def create_ui(): hide_tags = gr.CheckboxGroup(value=["ads", "localization", "installed"], label="Hide extensions with tags", choices=["script", "ads", "localization", "installed"]) sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order", ], type="index") - with gr.Row(): + with gr.Row(): search_extensions_text = gr.Text(label="Search").style(container=False) - + install_result = gr.HTML() available_extensions_table = gr.HTML() diff --git a/modules/xlmr.py b/modules/xlmr.py index e056c3f6..a407a3ca 100644 --- a/modules/xlmr.py +++ b/modules/xlmr.py @@ -28,7 +28,7 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel): config_class = BertSeriesConfig def __init__(self, config=None, **kargs): - # modify initialization for autoloading + # modify initialization for autoloading if config is None: config = XLMRobertaConfig() config.attention_probs_dropout_prob= 0.1 @@ -74,7 +74,7 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel): text["attention_mask"] = torch.tensor( text['attention_mask']).to(device) features = self(**text) - return features['projection_state'] + return features['projection_state'] def forward( self, @@ -134,4 +134,4 @@ class BertSeriesModelWithTransformation(BertPreTrainedModel): class RobertaSeriesModelWithTransformation(BertSeriesModelWithTransformation): base_model_prefix = 'roberta' - config_class= RobertaSeriesConfig \ No newline at end of file + config_class= RobertaSeriesConfig -- cgit v1.2.1 From da10de022f69e7847bcc64a7914d56246d852e20 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 20:52:30 +0300 Subject: Make live previews use JPEG only when the image is lorge enough --- modules/progress.py | 12 ++++++++++-- modules/shared.py | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/progress.py b/modules/progress.py index 289dd311..c2e37834 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -95,9 +95,17 @@ def progressapi(req: ProgressRequest): image = shared.state.current_image if image is not None: buffered = io.BytesIO() - image.save(buffered, format=opts.live_previews_format) + format = opts.live_previews_format + save_kwargs = {} + if format == "auto": + if max(*image.size) > 256: + format = "jpeg" + else: + format = "png" + save_kwargs = {"optimize": True} + image.save(buffered, format=format, **save_kwargs) base64_image = base64.b64encode(buffered.getvalue()).decode('ascii') - live_preview = f"data:image/{opts.live_previews_format};base64,{base64_image}" + live_preview = f"data:image/{format};base64,{base64_image}" id_live_preview = shared.state.id_live_preview else: live_preview = None diff --git a/modules/shared.py b/modules/shared.py index f387b5ae..22b45618 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -420,7 +420,7 @@ options_templates.update(options_section(('infotext', "Infotext"), { options_templates.update(options_section(('ui', "Live previews"), { "show_progressbar": OptionInfo(True, "Show progressbar"), "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), - "live_previews_format": OptionInfo("jpeg", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), + "live_previews_format": OptionInfo("auto", "Live preview file format", gr.Radio, {"choices": ["auto", "jpeg", "png", "webp"]}), "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), "show_progress_every_n_steps": OptionInfo(10, "Show new live preview image every N sampling steps. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}), "show_progress_type": OptionInfo("Approx NN", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap"]}), -- cgit v1.2.1 From 681c16dd1e911bdf831b031b0f31aaba41c280f8 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 12 May 2023 22:33:21 +0900 Subject: fix --data-dir for COMMANDLINE_ARGS move reading of COMMANDLINE_ARGS into paths_internal.py so --data-dir can be properly read --- modules/paths_internal.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'modules') diff --git a/modules/paths_internal.py b/modules/paths_internal.py index a23f6d70..005a9b0a 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -2,6 +2,11 @@ import argparse import os +import sys +import shlex + +commandline_args = os.environ.get('COMMANDLINE_ARGS', "") +sys.argv += shlex.split(commandline_args) modules_path = os.path.dirname(os.path.realpath(__file__)) script_path = os.path.dirname(modules_path) -- cgit v1.2.1 From 867be74244dc725fcf2685018b97501e83a16235 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 12 May 2023 18:08:34 +0000 Subject: Define default fonts for Gradio theme Allows web UI to (almost) be ran fully offline. The web UI will hang on load if offline when these fonts are not manually defined, as it will attempt (and fail) to pull from Google Fonts. --- modules/shared.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 1df1dd45..b09b384e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -666,13 +666,19 @@ def reload_gradio_theme(theme_name=None): theme_name = opts.gradio_theme if theme_name == "Default": - gradio_theme = gr.themes.Default() + gradio_theme = gr.themes.Default( + font=['Helvetica', 'ui-sans-serif', 'system-ui', 'sans-serif'], + font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], + ) else: try: gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) except Exception as e: errors.display(e, "changing gradio theme") - gradio_theme = gr.themes.Default() + gradio_theme = gr.themes.Default( + font=['Helvetica', 'ui-sans-serif', 'system-ui', 'sans-serif'], + font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], + ) -- cgit v1.2.1 From 5afc44aab14819afea87b2f36c2f76dc43d3e83c Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 13 May 2023 12:57:32 +0000 Subject: Requested changes --- modules/shared.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index b09b384e..96a20a6b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -665,20 +665,19 @@ def reload_gradio_theme(theme_name=None): if not theme_name: theme_name = opts.gradio_theme + default_theme_args = dict( + font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], + font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], + ) + if theme_name == "Default": - gradio_theme = gr.themes.Default( - font=['Helvetica', 'ui-sans-serif', 'system-ui', 'sans-serif'], - font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], - ) + gradio_theme = gr.themes.Default(**default_theme_args) else: try: gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) except Exception as e: errors.display(e, "changing gradio theme") - gradio_theme = gr.themes.Default( - font=['Helvetica', 'ui-sans-serif', 'system-ui', 'sans-serif'], - font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], - ) + gradio_theme = gr.themes.Default(**default_theme_args) -- cgit v1.2.1 From 55e52c878ab669d5b11b001a4152ee1a3b8d4880 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Sat, 13 May 2023 09:24:56 -0500 Subject: remove command line option --- modules/cmd_args.py | 1 - modules/processing.py | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) (limited to 'modules') diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 46043e33..f4a4ab36 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -102,5 +102,4 @@ parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gra parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers") parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) -parser.add_argument("--token-merging", action='store_true', help="Provides speed and memory improvements by merging redundant tokens. This has a more pronounced effect on higher resolutions.", default=False) parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy') diff --git a/modules/processing.py b/modules/processing.py index 8ba3a96b..6828e898 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -496,8 +496,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, - "Token merging ratio": None if not (opts.token_merging or cmd_opts.token_merging) or opts.token_merging_hr_only else opts.token_merging_ratio, - "Token merging ratio hr": None if not (opts.token_merging or cmd_opts.token_merging) else opts.token_merging_ratio_hr, + "Token merging ratio": None if not opts.token_merging or opts.token_merging_hr_only else opts.token_merging_ratio, + "Token merging ratio hr": None if not opts.token_merging else opts.token_merging_ratio_hr, "Token merging random": None if opts.token_merging_random is False else opts.token_merging_random, "Token merging merge attention": None if opts.token_merging_merge_attention is True else opts.token_merging_merge_attention, "Token merging merge cross attention": None if opts.token_merging_merge_cross_attention is False else opts.token_merging_merge_cross_attention, @@ -538,7 +538,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if (opts.token_merging or cmd_opts.token_merging) and not opts.token_merging_hr_only: + if opts.token_merging and not opts.token_merging_hr_only: sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) logger.debug('Token merging applied') @@ -546,7 +546,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: finally: # undo model optimizations made by tomesd - if opts.token_merging or cmd_opts.token_merging: + if opts.token_merging: tomesd.remove_patch(p.sd_model) logger.debug('Token merging model optimizations removed') @@ -1004,7 +1004,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): # apply token merging optimizations from tomesd for high-res pass # check if hr_only so we are not redundantly patching - if (cmd_opts.token_merging or opts.token_merging) and (opts.token_merging_hr_only or opts.token_merging_ratio_hr != opts.token_merging_ratio): + if opts.token_merging and (opts.token_merging_hr_only or opts.token_merging_ratio_hr != opts.token_merging_ratio): # case where user wants to use separate merge ratios if not opts.token_merging_hr_only: # clean patch done by first pass. (clobbering the first patch might be fine? this might be excessive) -- cgit v1.2.1 From cb5f61281a95be72fc812b7d350b6ec23e2f9bdd Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sat, 13 May 2023 11:04:26 -0400 Subject: Allow bf16 in safe unpickler --- modules/safe.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/safe.py b/modules/safe.py index 1e791c5b..e8f50774 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -40,7 +40,7 @@ class RestrictedUnpickler(pickle.Unpickler): return getattr(collections, name) if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter', '_rebuild_device_tensor_from_numpy']: return getattr(torch._utils, name) - if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage', 'ByteStorage', 'float32']: + if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage', 'ByteStorage', 'float32', 'BFloat16Storage']: return getattr(torch, name) if module == 'torch.nn.modules.container' and name in ['ParameterDict']: return getattr(torch.nn.modules.container, name) -- cgit v1.2.1 From ac83627a31daac06f4d48b0e7db223ef807fe8e5 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Sat, 13 May 2023 10:23:42 -0500 Subject: heavily simplify --- modules/generation_parameters_copypaste.py | 36 ------------------------- modules/processing.py | 35 +++++++++++-------------- modules/sd_models.py | 11 +++----- modules/shared.py | 42 +++--------------------------- 4 files changed, 23 insertions(+), 101 deletions(-) (limited to 'modules') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index fb56254f..a0a98bbc 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -282,33 +282,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model res["Hires resize-1"] = 0 res["Hires resize-2"] = 0 - # Infer additional override settings for token merging - token_merging_ratio = res.get("Token merging ratio", None) - token_merging_ratio_hr = res.get("Token merging ratio hr", None) - - if token_merging_ratio is not None or token_merging_ratio_hr is not None: - res["Token merging"] = 'True' - - if token_merging_ratio is None: - res["Token merging hr only"] = 'True' - else: - res["Token merging hr only"] = 'False' - - if res.get("Token merging random", None) is None: - res["Token merging random"] = 'False' - if res.get("Token merging merge attention", None) is None: - res["Token merging merge attention"] = 'True' - if res.get("Token merging merge cross attention", None) is None: - res["Token merging merge cross attention"] = 'False' - if res.get("Token merging merge mlp", None) is None: - res["Token merging merge mlp"] = 'False' - if res.get("Token merging stride x", None) is None: - res["Token merging stride x"] = '2' - if res.get("Token merging stride y", None) is None: - res["Token merging stride y"] = '2' - if res.get("Token merging maximum down sampling", None) is None: - res["Token merging maximum down sampling"] = '1' - restore_old_hires_fix_params(res) # Missing RNG means the default was set, which is GPU RNG @@ -335,17 +308,8 @@ infotext_to_setting_name_mapping = [ ('UniPC skip type', 'uni_pc_skip_type'), ('UniPC order', 'uni_pc_order'), ('UniPC lower order final', 'uni_pc_lower_order_final'), - ('Token merging', 'token_merging'), ('Token merging ratio', 'token_merging_ratio'), - ('Token merging hr only', 'token_merging_hr_only'), ('Token merging ratio hr', 'token_merging_ratio_hr'), - ('Token merging random', 'token_merging_random'), - ('Token merging merge attention', 'token_merging_merge_attention'), - ('Token merging merge cross attention', 'token_merging_merge_cross_attention'), - ('Token merging merge mlp', 'token_merging_merge_mlp'), - ('Token merging maximum down sampling', 'token_merging_maximum_down_sampling'), - ('Token merging stride x', 'token_merging_stride_x'), - ('Token merging stride y', 'token_merging_stride_y'), ('RNG', 'randn_source'), ('NGMS', 's_min_uncond') ] diff --git a/modules/processing.py b/modules/processing.py index 6828e898..32ff61e9 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -34,7 +34,7 @@ import tomesd # add a logger for the processing module logger = logging.getLogger(__name__) # manually set output level here since there is no option to do so yet through launch options -# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') +logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') # some of those options should not be changed at all because they would break the model, so I removed them from options. @@ -496,15 +496,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, - "Token merging ratio": None if not opts.token_merging or opts.token_merging_hr_only else opts.token_merging_ratio, - "Token merging ratio hr": None if not opts.token_merging else opts.token_merging_ratio_hr, - "Token merging random": None if opts.token_merging_random is False else opts.token_merging_random, - "Token merging merge attention": None if opts.token_merging_merge_attention is True else opts.token_merging_merge_attention, - "Token merging merge cross attention": None if opts.token_merging_merge_cross_attention is False else opts.token_merging_merge_cross_attention, - "Token merging merge mlp": None if opts.token_merging_merge_mlp is False else opts.token_merging_merge_mlp, - "Token merging stride x": None if opts.token_merging_stride_x == 2 else opts.token_merging_stride_x, - "Token merging stride y": None if opts.token_merging_stride_y == 2 else opts.token_merging_stride_y, - "Token merging maximum down sampling": None if opts.token_merging_maximum_down_sampling == 1 else opts.token_merging_maximum_down_sampling, + "Token merging ratio": None if opts.token_merging_ratio == 0 else opts.token_merging_ratio, + "Token merging ratio hr": None if not p.enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, @@ -538,15 +531,15 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if opts.token_merging and not opts.token_merging_hr_only: + if opts.token_merging_ratio > 0: sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) - logger.debug('Token merging applied') + logger.debug(f"Token merging applied to first pass. Ratio: '{opts.token_merging_ratio}'") res = process_images_inner(p) finally: # undo model optimizations made by tomesd - if opts.token_merging: + if opts.token_merging_ratio > 0: tomesd.remove_patch(p.sd_model) logger.debug('Token merging model optimizations removed') @@ -1003,19 +996,21 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): devices.torch_gc() # apply token merging optimizations from tomesd for high-res pass - # check if hr_only so we are not redundantly patching - if opts.token_merging and (opts.token_merging_hr_only or opts.token_merging_ratio_hr != opts.token_merging_ratio): - # case where user wants to use separate merge ratios - if not opts.token_merging_hr_only: - # clean patch done by first pass. (clobbering the first patch might be fine? this might be excessive) + if opts.token_merging_ratio_hr > 0: + # in case the user has used separate merge ratios + if opts.token_merging_ratio > 0: tomesd.remove_patch(self.sd_model) - logger.debug('Temporarily removed token merging optimizations in preparation for next pass') + logger.debug('Adjusting token merging ratio for high-res pass') sd_models.apply_token_merging(sd_model=self.sd_model, hr=True) - logger.debug('Applied token merging for high-res pass') + logger.debug(f"Applied token merging for high-res pass. Ratio: '{opts.token_merging_ratio_hr}'") samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) + if opts.token_merging_ratio_hr > 0 or opts.token_merging_ratio > 0: + tomesd.remove_patch(self.sd_model) + logger.debug('Removed token merging optimizations from model') + self.is_hr_pass = False return samples diff --git a/modules/sd_models.py b/modules/sd_models.py index 4787193c..4c9a0a1f 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -596,11 +596,8 @@ def apply_token_merging(sd_model, hr: bool): tomesd.apply_patch( sd_model, ratio=ratio, - max_downsample=shared.opts.token_merging_maximum_down_sampling, - sx=shared.opts.token_merging_stride_x, - sy=shared.opts.token_merging_stride_y, - use_rand=shared.opts.token_merging_random, - merge_attn=shared.opts.token_merging_merge_attention, - merge_crossattn=shared.opts.token_merging_merge_cross_attention, - merge_mlp=shared.opts.token_merging_merge_mlp + use_rand=False, # can cause issues with some samplers + merge_attn=True, + merge_crossattn=False, + merge_mlp=False ) diff --git a/modules/shared.py b/modules/shared.py index 4b346585..0d96c14c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -459,47 +459,13 @@ options_templates.update(options_section((None, "Hidden options"), { })) options_templates.update(options_section(('token_merging', 'Token Merging'), { - "token_merging": OptionInfo( - False, "Enable redundant token merging via tomesd. This can provide significant speed and memory improvements.", - gr.Checkbox - ), - "token_merging_ratio": OptionInfo( - 0.5, "Merging Ratio", - gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1} - ), - "token_merging_hr_only": OptionInfo( - True, "Apply only to high-res fix pass. Disabling can yield a ~20-35% speedup on contemporary resolutions.", - gr.Checkbox - ), "token_merging_ratio_hr": OptionInfo( - 0.5, "Merging Ratio (high-res pass) - If 'Apply only to high-res' is enabled, this will always be the ratio used.", + 0, "Merging Ratio (high-res pass)", gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1} ), - # More advanced/niche settings: - "token_merging_random": OptionInfo( - False, "Use random perturbations - Can improve outputs for certain samplers. For others, it may cause visual artifacting.", - gr.Checkbox - ), - "token_merging_merge_attention": OptionInfo( - True, "Merge attention", - gr.Checkbox - ), - "token_merging_merge_cross_attention": OptionInfo( - False, "Merge cross attention", - gr.Checkbox - ), - "token_merging_merge_mlp": OptionInfo( - False, "Merge mlp", - gr.Checkbox - ), - "token_merging_maximum_down_sampling": OptionInfo(1, "Maximum down sampling", gr.Radio, lambda: {"choices": [1, 2, 4, 8]}), - "token_merging_stride_x": OptionInfo( - 2, "Stride - X", - gr.Slider, {"minimum": 2, "maximum": 8, "step": 2} - ), - "token_merging_stride_y": OptionInfo( - 2, "Stride - Y", - gr.Slider, {"minimum": 2, "maximum": 8, "step": 2} + "token_merging_ratio": OptionInfo( + 0, "Merging Ratio", + gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1} ) })) -- cgit v1.2.1 From 917faa5325371e51d68f7d6f7b15ea4466bd5adf Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Sat, 13 May 2023 10:26:09 -0500 Subject: move to stable-diffusion tab --- modules/shared.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 0d96c14c..e49e9b74 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -350,6 +350,8 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source. Changes seeds drastically. Use CPU to produce the same picture across different vidocard vendors.", gr.Radio, {"choices": ["GPU", "CPU"]}), + "token_merging_ratio_hr": OptionInfo(0, "Merging Ratio (high-res pass)", gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1}), + "token_merging_ratio": OptionInfo(0, "Merging Ratio", gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1}) })) options_templates.update(options_section(('compatibility', "Compatibility"), { @@ -458,16 +460,6 @@ options_templates.update(options_section((None, "Hidden options"), { "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), })) -options_templates.update(options_section(('token_merging', 'Token Merging'), { - "token_merging_ratio_hr": OptionInfo( - 0, "Merging Ratio (high-res pass)", - gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1} - ), - "token_merging_ratio": OptionInfo( - 0, "Merging Ratio", - gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1} - ) -})) options_templates.update() -- cgit v1.2.1 From c2fdb44880e07f43aee2f7edc1dc36a9516501e8 Mon Sep 17 00:00:00 2001 From: papuSpartan <30642826+papuSpartan@users.noreply.github.com> Date: Sat, 13 May 2023 11:11:02 -0500 Subject: fix for img2img --- modules/processing.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 32ff61e9..94fe2625 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -34,7 +34,7 @@ import tomesd # add a logger for the processing module logger = logging.getLogger(__name__) # manually set output level here since there is no option to do so yet through launch options -logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') +# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') # some of those options should not be changed at all because they would break the model, so I removed them from options. @@ -478,6 +478,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter index = position_in_batch + iteration * p.batch_size clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) + enable_hr = getattr(p, 'enable_hr', False) generation_params = { "Steps": p.steps, @@ -497,7 +498,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, "Token merging ratio": None if opts.token_merging_ratio == 0 else opts.token_merging_ratio, - "Token merging ratio hr": None if not p.enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, + "Token merging ratio hr": None if not enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, -- cgit v1.2.1 From 1f57b948b78df872c5a8a1c6e6c7e3c35e06f969 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Sat, 13 May 2023 19:14:10 +0300 Subject: Move localization to its own script block and load it first --- modules/localization.py | 4 ++-- modules/ui.py | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'modules') diff --git a/modules/localization.py b/modules/localization.py index f6a6f2fb..ee9c65e7 100644 --- a/modules/localization.py +++ b/modules/localization.py @@ -23,7 +23,7 @@ def list_localizations(dirname): localizations[fn] = file.path -def localization_js(current_localization_name): +def localization_js(current_localization_name: str) -> str: fn = localizations.get(current_localization_name, None) data = {} if fn is not None: @@ -34,4 +34,4 @@ def localization_js(current_localization_name): print(f"Error loading localization from {fn}:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) - return f"var localization = {json.dumps(data)}\n" + return f"window.localization = {json.dumps(data)}" diff --git a/modules/ui.py b/modules/ui.py index ff82fff6..ff25c4ce 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1771,12 +1771,11 @@ def webpath(fn): def javascript_html(): - script_js = os.path.join(script_path, "script.js") - head = f'\n' + # Ensure localization is in `window` before scripts + head = f'\n' - inline = f"{localization.localization_js(shared.opts.localization)};" - if cmd_opts.theme is not None: - inline += f"set_theme('{cmd_opts.theme}');" + script_js = os.path.join(script_path, "script.js") + head += f'\n' for script in modules.scripts.list_scripts("javascript", ".js"): head += f'\n' @@ -1784,7 +1783,8 @@ def javascript_html(): for script in modules.scripts.list_scripts("javascript", ".mjs"): head += f'\n' - head += f'\n' + if cmd_opts.theme: + head += f'\n' return head -- cgit v1.2.1 From 7e3539df6f4e3979e080ed5d76faa3649c10f76f Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 13 May 2023 20:21:11 +0300 Subject: fix upscalers disappearing after the user reloads UI --- modules/modelloader.py | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) (limited to 'modules') diff --git a/modules/modelloader.py b/modules/modelloader.py index cb85ac4f..a70aa0e3 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -117,20 +117,6 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None): pass -builtin_upscaler_classes = [] -forbidden_upscaler_classes = set() - - -def list_builtin_upscalers(): - builtin_upscaler_classes.clear() - builtin_upscaler_classes.extend(Upscaler.__subclasses__()) - -def forbid_loaded_nonbuiltin_upscalers(): - for cls in Upscaler.__subclasses__(): - if cls not in builtin_upscaler_classes: - forbidden_upscaler_classes.add(cls) - - def load_upscalers(): # We can only do this 'magic' method to dynamically load upscalers if they are referenced, # so we'll try to import any _model.py files before looking in __subclasses__ @@ -146,10 +132,17 @@ def load_upscalers(): datas = [] commandline_options = vars(shared.cmd_opts) - for cls in Upscaler.__subclasses__(): - if cls in forbidden_upscaler_classes: - continue + # some of upscaler classes will not go away after reloading their modules, and we'll end + # up with two copies of those classes. The newest copy will always be the last in the list, + # so we go from end to beginning and ignore duplicates + used_classes = {} + for cls in reversed(Upscaler.__subclasses__()): + classname = str(cls) + if classname not in used_classes: + used_classes[classname] = cls + + for cls in reversed(used_classes.values()): name = cls.__name__ cmd_name = f"{name.lower().replace('upscaler', '')}_models_path" scaler = cls(commandline_options.get(cmd_name, None)) -- cgit v1.2.1 From 063848798c4d4df6d3e108f4cc00c35ca38f5ebd Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 13 May 2023 19:45:18 +0300 Subject: Merge pull request #10339 from catboxanon/bf16 Allow bf16 in safe unpickler --- modules/safe.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/safe.py b/modules/safe.py index e6c2f2c0..e1a67f73 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -40,7 +40,7 @@ class RestrictedUnpickler(pickle.Unpickler): return getattr(collections, name) if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter', '_rebuild_device_tensor_from_numpy']: return getattr(torch._utils, name) - if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage', 'ByteStorage', 'float32']: + if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage', 'ByteStorage', 'float32', 'BFloat16Storage']: return getattr(torch, name) if module == 'torch.nn.modules.container' and name in ['ParameterDict']: return getattr(torch.nn.modules.container, name) -- cgit v1.2.1 From 12c78138dd56eef029ce74e371c8e646cb07f6fb Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 13 May 2023 19:43:15 +0300 Subject: Merge pull request #10324 from catboxanon/offline Allow web UI to be ran fully offline --- modules/shared.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 4631965b..b3508883 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -667,14 +667,19 @@ def reload_gradio_theme(theme_name=None): if not theme_name: theme_name = opts.gradio_theme + default_theme_args = dict( + font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'], + font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'], + ) + if theme_name == "Default": - gradio_theme = gr.themes.Default() + gradio_theme = gr.themes.Default(**default_theme_args) else: try: gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) except Exception as e: errors.display(e, "changing gradio theme") - gradio_theme = gr.themes.Default() + gradio_theme = gr.themes.Default(**default_theme_args) -- cgit v1.2.1 From 86ff43b930077aa41439c570fe41ad5de910455d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 13 May 2023 19:44:55 +0300 Subject: Merge pull request #10335 from akx/l10n-dis-take-2 Localization fixes --- modules/localization.py | 4 ++-- modules/ui.py | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'modules') diff --git a/modules/localization.py b/modules/localization.py index f6a6f2fb..ee9c65e7 100644 --- a/modules/localization.py +++ b/modules/localization.py @@ -23,7 +23,7 @@ def list_localizations(dirname): localizations[fn] = file.path -def localization_js(current_localization_name): +def localization_js(current_localization_name: str) -> str: fn = localizations.get(current_localization_name, None) data = {} if fn is not None: @@ -34,4 +34,4 @@ def localization_js(current_localization_name): print(f"Error loading localization from {fn}:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) - return f"var localization = {json.dumps(data)}\n" + return f"window.localization = {json.dumps(data)}" diff --git a/modules/ui.py b/modules/ui.py index d02f6e82..f07bcc41 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1863,12 +1863,11 @@ def webpath(fn): def javascript_html(): - script_js = os.path.join(script_path, "script.js") - head = f'\n' + # Ensure localization is in `window` before scripts + head = f'\n' - inline = f"{localization.localization_js(shared.opts.localization)};" - if cmd_opts.theme is not None: - inline += f"set_theme('{cmd_opts.theme}');" + script_js = os.path.join(script_path, "script.js") + head += f'\n' for script in modules.scripts.list_scripts("javascript", ".js"): head += f'\n' @@ -1876,7 +1875,8 @@ def javascript_html(): for script in modules.scripts.list_scripts("javascript", ".mjs"): head += f'\n' - head += f'\n' + if cmd_opts.theme: + head += f'\n' return head -- cgit v1.2.1 From 3078001439d25b66ef5627c9e3d431aa23bbed73 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Sun, 14 May 2023 01:49:41 +0000 Subject: Add/modify CFG callbacks Required by self-attn guidance extension https://github.com/ashen-sensored/sd_webui_SAG --- modules/script_callbacks.py | 35 +++++++++++++++++++++++++++++++++++ modules/sd_samplers_kdiffusion.py | 8 +++++++- 2 files changed, 42 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 7d9dd736..e83c6ecf 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -53,6 +53,21 @@ class CFGDenoiserParams: class CFGDenoisedParams: + def __init__(self, x, sampling_step, total_sampling_steps, inner_model): + self.x = x + """Latent image representation in the process of being denoised""" + + self.sampling_step = sampling_step + """Current Sampling step number""" + + self.total_sampling_steps = total_sampling_steps + """Total number of sampling steps planned""" + + self.inner_model = inner_model + """Inner model reference that is being used for denoising""" + + +class AfterCFGCallbackParams: def __init__(self, x, sampling_step, total_sampling_steps): self.x = x """Latent image representation in the process of being denoised""" @@ -63,6 +78,9 @@ class CFGDenoisedParams: self.total_sampling_steps = total_sampling_steps """Total number of sampling steps planned""" + self.output_altered = False + """A flag for CFGDenoiser that indicates whether the output has been altered by the callback""" + class UiTrainTabParams: def __init__(self, txt2img_preview_params): @@ -87,6 +105,7 @@ callback_map = dict( callbacks_image_saved=[], callbacks_cfg_denoiser=[], callbacks_cfg_denoised=[], + callbacks_cfg_after_cfg=[], callbacks_before_component=[], callbacks_after_component=[], callbacks_image_grid=[], @@ -186,6 +205,14 @@ def cfg_denoised_callback(params: CFGDenoisedParams): report_exception(c, 'cfg_denoised_callback') +def cfg_after_cfg_callback(params: AfterCFGCallbackParams): + for c in callback_map['callbacks_cfg_after_cfg']: + try: + c.callback(params) + except Exception: + report_exception(c, 'cfg_after_cfg_callback') + + def before_component_callback(component, **kwargs): for c in callback_map['callbacks_before_component']: try: @@ -332,6 +359,14 @@ def on_cfg_denoised(callback): add_callback(callback_map['callbacks_cfg_denoised'], callback) +def on_cfg_after_cfg(callback): + """register a function to be called in the kdiffussion cfg_denoiser method after cfg calculations has completed. + The callback is called with one argument: + - params: CFGDenoisedParams - parameters to be passed to the inner model and sampling state details. + """ + add_callback(callback_map['callbacks_cfg_after_cfg'], callback) + + def on_before_component(callback): """register a function to be called before a component is created. The callback is called with arguments: diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index e9e41818..55f0d3a3 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -8,6 +8,7 @@ from modules.shared import opts, state import modules.shared as shared from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback +from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback samplers_k_diffusion = [ ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {}), @@ -160,7 +161,7 @@ class CFGDenoiser(torch.nn.Module): fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes]) x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be - denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) + denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model) cfg_denoised_callback(denoised_params) devices.test_for_nans(x_out, "unet") @@ -180,6 +181,11 @@ class CFGDenoiser(torch.nn.Module): if self.mask is not None: denoised = self.init_latent * self.mask + self.nmask * denoised + after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) + cfg_after_cfg_callback(after_cfg_callback_params) + if after_cfg_callback_params.output_altered: + denoised = after_cfg_callback_params.x + self.step += 1 return denoised -- cgit v1.2.1 From 8abfc95013d247c8a863d048574bc1f9d1eb0443 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Sun, 14 May 2023 12:56:34 +0800 Subject: Update script_callbacks.py --- modules/script_callbacks.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index e83c6ecf..57dfd457 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -64,7 +64,7 @@ class CFGDenoisedParams: """Total number of sampling steps planned""" self.inner_model = inner_model - """Inner model reference that is being used for denoising""" + """Inner model reference used for denoising""" class AfterCFGCallbackParams: @@ -79,7 +79,7 @@ class AfterCFGCallbackParams: """Total number of sampling steps planned""" self.output_altered = False - """A flag for CFGDenoiser that indicates whether the output has been altered by the callback""" + """A flag for CFGDenoiser indicating whether the output has been altered by the callback""" class UiTrainTabParams: @@ -360,9 +360,9 @@ def on_cfg_denoised(callback): def on_cfg_after_cfg(callback): - """register a function to be called in the kdiffussion cfg_denoiser method after cfg calculations has completed. + """register a function to be called in the kdiffussion cfg_denoiser method after cfg calculations are completed. The callback is called with one argument: - - params: CFGDenoisedParams - parameters to be passed to the inner model and sampling state details. + - params: AfterCFGCallbackParams - parameters to be passed to the script for post-processing after cfg calculation. """ add_callback(callback_map['callbacks_cfg_after_cfg'], callback) -- cgit v1.2.1 From 005849331e82cded96f6f3e5ff828037c672c38d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 14 May 2023 08:15:22 +0300 Subject: remove output_altered flag from AfterCFGCallbackParams --- modules/script_callbacks.py | 3 --- modules/sd_samplers_kdiffusion.py | 3 +-- 2 files changed, 1 insertion(+), 5 deletions(-) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 57dfd457..3c21a362 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -78,9 +78,6 @@ class AfterCFGCallbackParams: self.total_sampling_steps = total_sampling_steps """Total number of sampling steps planned""" - self.output_altered = False - """A flag for CFGDenoiser indicating whether the output has been altered by the callback""" - class UiTrainTabParams: def __init__(self, txt2img_preview_params): diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 55f0d3a3..61f23ad7 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -183,8 +183,7 @@ class CFGDenoiser(torch.nn.Module): after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps) cfg_after_cfg_callback(after_cfg_callback_params) - if after_cfg_callback_params.output_altered: - denoised = after_cfg_callback_params.x + denoised = after_cfg_callback_params.x self.step += 1 return denoised -- cgit v1.2.1 From 2cfaffb239bb2b99aab06352f8c101e48e48dec9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 14 May 2023 08:30:37 +0300 Subject: updates for #9256 --- modules/generation_parameters_copypaste.py | 2 +- modules/shared.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index a0a98bbc..f1a2204c 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -311,7 +311,7 @@ infotext_to_setting_name_mapping = [ ('Token merging ratio', 'token_merging_ratio'), ('Token merging ratio hr', 'token_merging_ratio_hr'), ('RNG', 'randn_source'), - ('NGMS', 's_min_uncond') + ('NGMS', 's_min_uncond'), ] diff --git a/modules/shared.py b/modules/shared.py index a5e8d0bd..7ec9967e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -350,8 +350,8 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source. Changes seeds drastically. Use CPU to produce the same picture across different vidocard vendors.", gr.Radio, {"choices": ["GPU", "CPU"]}), - "token_merging_ratio_hr": OptionInfo(0, "Merging Ratio (high-res pass)", gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1}), - "token_merging_ratio": OptionInfo(0, "Merging Ratio", gr.Slider, {"minimum": 0, "maximum": 0.9, "step": 0.1}) + "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}), + "token_merging_ratio_hr": OptionInfo(0.0, "Togen merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}), })) options_templates.update(options_section(('compatibility', "Compatibility"), { -- cgit v1.2.1 From e14b586d0494d6c5cc3cbc45b5fa00c03d052443 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Sun, 14 May 2023 12:42:44 +0800 Subject: Add Tiny AE live preview --- modules/sd_samplers_common.py | 21 +++++++----- modules/sd_vae_taesd.py | 76 +++++++++++++++++++++++++++++++++++++++++++ modules/shared.py | 2 +- 3 files changed, 90 insertions(+), 9 deletions(-) create mode 100644 modules/sd_vae_taesd.py (limited to 'modules') diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index bc074238..d3dc130c 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -2,7 +2,7 @@ from collections import namedtuple import numpy as np import torch from PIL import Image -from modules import devices, processing, images, sd_vae_approx +from modules import devices, processing, images, sd_vae_approx, sd_vae_taesd from modules.shared import opts, state import modules.shared as shared @@ -22,21 +22,26 @@ def setup_img2img_steps(p, steps=None): return steps, t_enc -approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2} +approximation_indexes = {"Full": 0, "Tiny AE": 1, "Approx NN": 2, "Approx cheap": 3} def single_sample_to_image(sample, approximation=None): if approximation is None: approximation = approximation_indexes.get(opts.show_progress_type, 0) - if approximation == 2: - x_sample = sd_vae_approx.cheap_approximation(sample) - elif approximation == 1: - x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() + if approximation == 1: + x_sample = sd_vae_taesd.decode()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() + x_sample = sd_vae_taesd.TAESD.unscale_latents(x_sample) + x_sample = torch.clamp((x_sample * 0.25) + 0.5, 0, 1) else: - x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] + if approximation == 3: + x_sample = sd_vae_approx.cheap_approximation(sample) + elif approximation == 2: + x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() + else: + x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] + x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) - x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) return Image.fromarray(x_sample) diff --git a/modules/sd_vae_taesd.py b/modules/sd_vae_taesd.py new file mode 100644 index 00000000..ccc97959 --- /dev/null +++ b/modules/sd_vae_taesd.py @@ -0,0 +1,76 @@ +""" +Tiny AutoEncoder for Stable Diffusion +(DNN for encoding / decoding SD's latent space) + +https://github.com/madebyollin/taesd +""" +import os +import torch +import torch.nn as nn + +from modules import devices, paths_internal + +sd_vae_taesd = None + + +def conv(n_in, n_out, **kwargs): + return nn.Conv2d(n_in, n_out, 3, padding=1, **kwargs) + + +class Clamp(nn.Module): + @staticmethod + def forward(x): + return torch.tanh(x / 3) * 3 + + +class Block(nn.Module): + def __init__(self, n_in, n_out): + super().__init__() + self.conv = nn.Sequential(conv(n_in, n_out), nn.ReLU(), conv(n_out, n_out), nn.ReLU(), conv(n_out, n_out)) + self.skip = nn.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity() + self.fuse = nn.ReLU() + + def forward(self, x): + return self.fuse(self.conv(x) + self.skip(x)) + + +def decoder(): + return nn.Sequential( + Clamp(), conv(4, 64), nn.ReLU(), + Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False), + Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False), + Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False), + Block(64, 64), conv(64, 3), + ) + + +class TAESD(nn.Module): + latent_magnitude = 2 + latent_shift = 0.5 + + def __init__(self, decoder_path="taesd_decoder.pth"): + """Initialize pretrained TAESD on the given device from the given checkpoints.""" + super().__init__() + self.decoder = decoder() + self.decoder.load_state_dict( + torch.load(decoder_path, map_location='cpu' if devices.device.type != 'cuda' else None)) + + @staticmethod + def unscale_latents(x): + """[0, 1] -> raw latents""" + return x.sub(TAESD.latent_shift).mul(2 * TAESD.latent_magnitude) + + +def decode(): + global sd_vae_taesd + + if sd_vae_taesd is None: + model_path = os.path.join(paths_internal.models_path, "VAE-approx", "taesd_decoder.pth") + if os.path.exists(model_path): + sd_vae_taesd = TAESD(model_path) + sd_vae_taesd.eval() + sd_vae_taesd.to(devices.device, devices.dtype) + else: + raise FileNotFoundError('Tiny AE mdoel not found') + + return sd_vae_taesd.decoder diff --git a/modules/shared.py b/modules/shared.py index 4631965b..6760a900 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -425,7 +425,7 @@ options_templates.update(options_section(('ui', "Live previews"), { "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), "show_progress_every_n_steps": OptionInfo(10, "Show new live preview image every N sampling steps. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}), - "show_progress_type": OptionInfo("Approx NN", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap"]}), + "show_progress_type": OptionInfo("Tiny AE", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Tiny AE", "Approx NN", "Approx cheap"]}), "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), "live_preview_refresh_period": OptionInfo(1000, "Progressbar/preview update period, in milliseconds") })) -- cgit v1.2.1 From bd9b9d425a355e151b43047a5df5fcead2fcdc52 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Sun, 14 May 2023 13:19:11 +0800 Subject: Add live preview mode check --- modules/sd_samplers_common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index d3dc130c..b1e8a780 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -26,8 +26,8 @@ approximation_indexes = {"Full": 0, "Tiny AE": 1, "Approx NN": 2, "Approx cheap" def single_sample_to_image(sample, approximation=None): - if approximation is None: - approximation = approximation_indexes.get(opts.show_progress_type, 0) + if approximation is None or approximation not in approximation_indexes.keys(): + approximation = approximation_indexes.get(opts.show_progress_type, 1) if approximation == 1: x_sample = sd_vae_taesd.decode()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() -- cgit v1.2.1 From ce515b81c57a2028ea515bd8f6f7984ba0f08963 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 14 May 2023 10:02:51 +0300 Subject: set up a system to provide extra info for settings elements in python rather than js add a bit of spacing/styling to settings elements add link info for token merging --- modules/shared.py | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 7ec9967e..24fdcd59 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -199,8 +199,9 @@ interrogator = modules.interrogate.InterrogateModels("interrogate") face_restorers = [] + class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None): + def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after=''): self.default = default self.label = label self.component = component @@ -209,6 +210,24 @@ class OptionInfo: self.section = section self.refresh = refresh + self.comment_before = comment_before + """HTML text that will be added after label in UI""" + + self.comment_after = comment_after + """HTML text that will be added before label in UI""" + + def link(self, label, url): + self.comment_before += f"[{label}]" + return self + + def js(self, label, js_func): + self.comment_before += f"[{label}]" + return self + + def info(self, info): + self.comment_after += f"({info})" + return self + def options_section(section_identifier, options_dict): for v in options_dict.values(): @@ -240,7 +259,7 @@ options_templates = {} options_templates.update(options_section(('saving-images', "Saving images/grids"), { "samples_save": OptionInfo(True, "Always save all generated images"), "samples_format": OptionInfo('png', 'File format for images'), - "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs), + "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs), "grid_save": OptionInfo(True, "Always save all generated image grids"), @@ -290,7 +309,7 @@ options_templates.update(options_section(('saving-to-dirs', "Saving to a directo "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"), "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"), "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"), - "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs), + "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"), "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}), })) @@ -350,7 +369,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source. Changes seeds drastically. Use CPU to produce the same picture across different vidocard vendors.", gr.Radio, {"choices": ["GPU", "CPU"]}), - "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}), + "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), "token_merging_ratio_hr": OptionInfo(0.0, "Togen merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}), })) @@ -404,7 +423,7 @@ options_templates.update(options_section(('ui', "User interface"), { "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), - "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}), + "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings"), "hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), @@ -572,7 +591,9 @@ class Options: func() def dumpjson(self): - d = {k: self.data.get(k, self.data_labels.get(k).default) for k in self.data_labels.keys()} + d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()} + d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None} + d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None} return json.dumps(d) def add_option(self, key, info): -- cgit v1.2.1 From a423f23d289225a39d6f93a03bfda13eddbb42b7 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 14 May 2023 16:22:40 +0900 Subject: allow jpeg for extra network preview --- modules/ui_extra_networks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index e35d0bfe..0baccf56 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -30,7 +30,7 @@ def fetch_file(filename: str = ""): raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.") ext = os.path.splitext(filename)[1].lower() - if ext not in (".png", ".jpg", ".webp"): + if ext not in (".png", ".jpg", ".jpeg", ".webp"): raise ValueError(f"File cannot be fetched: {filename}. Only png and jpg and webp.") # would profit from returning 304 @@ -194,7 +194,7 @@ class ExtraNetworksPage: Find a preview PNG for a given path (without extension) and call link_preview on it. """ - preview_extensions = ["png", "jpg", "webp"] + preview_extensions = ["png", "jpg", "jpeg", "webp"] if shared.opts.samples_format not in preview_extensions: preview_extensions.append(shared.opts.samples_format) -- cgit v1.2.1 From a00e42556ffbc1b757fda5ba3f85a9e11c931441 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 14 May 2023 11:04:21 +0300 Subject: add a bunch of descriptions and reword a lot of settings (sorry, localizers) --- modules/shared.py | 94 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 49 insertions(+), 45 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 24fdcd59..a0577644 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -228,6 +228,12 @@ class OptionInfo: self.comment_after += f"({info})" return self + def needs_restart(self): + self.comment_after += " (requires restart)" + return self + + + def options_section(section_identifier, options_dict): for v in options_dict.values(): @@ -278,10 +284,10 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"), "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"), - "export_for_4chan": OptionInfo(True, "If the saved image file size is above the limit, or its either width or height are above the limit, save a downscaled copy as JPG"), + "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"), "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number), "target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number), - "img_max_size_mp": OptionInfo(200, "Maximum image size, in megapixels", gr.Number), + "img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"), "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"), "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), @@ -314,23 +320,21 @@ options_templates.update(options_section(('saving-to-dirs', "Saving to a directo })) options_templates.update(options_section(('upscaling', "Upscaling"), { - "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), - "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}), - "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), + "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"), + "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"), + "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}), "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), - "SCUNET_tile": OptionInfo(256, "Tile size for SCUNET upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), - "SCUNET_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SCUNET upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}), })) options_templates.update(options_section(('face-restoration', "Face restoration"), { "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), - "code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), + "code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"), "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), })) options_templates.update(options_section(('system', "System"), { "show_warnings": OptionInfo(False, "Show warnings in console."), - "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}), + "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"), "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), @@ -355,20 +359,20 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), - "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list), + "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"), "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"), "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), - "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."), + "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"), "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill image's transparent parts with this color.", ui_components.FormColorPicker, {}), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."), - "enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"), + "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), - "comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }), - "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}), + "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), + "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP nrtwork; 1 ignores none, 2 ignores one layer"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), - "randn_source": OptionInfo("GPU", "Random number generator source. Changes seeds drastically. Use CPU to produce the same picture across different vidocard vendors.", gr.Radio, {"choices": ["GPU", "CPU"]}), + "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different vidocard vendors"), "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), "token_merging_ratio_hr": OptionInfo(0.0, "Togen merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}), })) @@ -382,30 +386,32 @@ options_templates.update(options_section(('compatibility', "Compatibility"), { })) options_templates.update(options_section(('interrogate', "Interrogate Options"), { - "interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"), - "interrogate_return_ranks": OptionInfo(False, "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators)."), - "interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), - "interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), - "interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), - "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file (0 = No limit)"), + "interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"), + "interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"), + "interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), + "interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), + "interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), + "interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"), "interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": modules.interrogate.category_types()}, refresh=modules.interrogate.category_types), - "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "Interrogate: deepbooru score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), - "deepbooru_sort_alpha": OptionInfo(True, "Interrogate: deepbooru sort alphabetically"), - "deepbooru_use_spaces": OptionInfo(False, "use spaces for tags in deepbooru"), - "deepbooru_escape": OptionInfo(True, "escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)"), - "deepbooru_filter_tags": OptionInfo("", "filter out those tags from deepbooru output (separated by comma)"), + "interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), + "deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"), + "deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"), + "deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"), + "deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"), })) options_templates.update(options_section(('extra_networks', "Extra Networks"), { "extra_networks_default_view": OptionInfo("cards", "Default view for Extra Networks", gr.Dropdown, {"choices": ["cards", "thumbs"]}), "extra_networks_default_multiplier": OptionInfo(1.0, "Multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"), - "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"), - "extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"), + "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"), + "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), + "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks), })) options_templates.update(options_section(('ui', "User interface"), { + "localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_restart(), + "gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}).needs_restart(), "return_grid": OptionInfo(True, "Show grid in results for web"), "return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"), "return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"), @@ -418,17 +424,15 @@ options_templates.update(options_section(('ui', "User interface"), { "js_modal_lightbox_gamepad": OptionInfo(True, "Navigate image viewer with gamepad"), "js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"), "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), - "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"), - "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row"), + "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_restart(), + "dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_restart(), "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), - "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings"), - "hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}), + "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_restart(), + "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), - "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), - "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), - "gradio_theme": OptionInfo("Default", "Gradio theme (requires restart)", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}) + "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_restart(), })) options_templates.update(options_section(('infotext', "Infotext"), { @@ -443,26 +447,26 @@ options_templates.update(options_section(('ui', "Live previews"), { "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), "live_previews_format": OptionInfo("auto", "Live preview file format", gr.Radio, {"choices": ["auto", "jpeg", "png", "webp"]}), "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), - "show_progress_every_n_steps": OptionInfo(10, "Show new live preview image every N sampling steps. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}), - "show_progress_type": OptionInfo("Approx NN", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap"]}), + "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"), + "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap"]}).info("Full = slow but pretty; Approx NN = fast but low quality; Approx cheap = super fast but terrible otherwise"), "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), - "live_preview_refresh_period": OptionInfo(1000, "Progressbar/preview update period, in milliseconds") + "live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"), })) options_templates.update(options_section(('sampler-params', "Sampler parameters"), { - "hide_samplers": OptionInfo([], "Hide samplers in user interface (requires restart)", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}), - "eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + "hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in list_samplers()]}).needs_restart(), + "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), + "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_min_uncond': OptionInfo(0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), - 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma"), + 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), + 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}), 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}), - 'uni_pc_order': OptionInfo(3, "UniPC order (must be < sampling steps)", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}), + 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"), 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"), })) -- cgit v1.2.1 From a58ae0b7174d9903fa426def2eda842dbbfcb53c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 14 May 2023 11:15:15 +0300 Subject: remove auto live previews format option, fix slow PNG generation --- modules/progress.py | 19 +++++++++---------- modules/shared.py | 2 +- 2 files changed, 10 insertions(+), 11 deletions(-) (limited to 'modules') diff --git a/modules/progress.py b/modules/progress.py index c2e37834..269863c9 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -95,17 +95,16 @@ def progressapi(req: ProgressRequest): image = shared.state.current_image if image is not None: buffered = io.BytesIO() - format = opts.live_previews_format - save_kwargs = {} - if format == "auto": - if max(*image.size) > 256: - format = "jpeg" - else: - format = "png" - save_kwargs = {"optimize": True} - image.save(buffered, format=format, **save_kwargs) + + if opts.live_previews_image_format == "png": + # using optimize for large images takes an enormous amount of time + save_kwargs = {"optimize": max(*image.size) > 256} + else: + save_kwargs = {} + + image.save(buffered, format=opts.live_previews_image_format, **save_kwargs) base64_image = base64.b64encode(buffered.getvalue()).decode('ascii') - live_preview = f"data:image/{format};base64,{base64_image}" + live_preview = f"data:image/{opts.live_previews_image_format};base64,{base64_image}" id_live_preview = shared.state.id_live_preview else: live_preview = None diff --git a/modules/shared.py b/modules/shared.py index a0577644..07f18b1b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -445,7 +445,7 @@ options_templates.update(options_section(('infotext', "Infotext"), { options_templates.update(options_section(('ui', "Live previews"), { "show_progressbar": OptionInfo(True, "Show progressbar"), "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), - "live_previews_format": OptionInfo("auto", "Live preview file format", gr.Radio, {"choices": ["auto", "jpeg", "png", "webp"]}), + "live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}), "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), "show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"), "show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap"]}).info("Full = slow but pretty; Approx NN = fast but low quality; Approx cheap = super fast but terrible otherwise"), -- cgit v1.2.1 From 1a43524018ea3e64b93be2abc2a49b6159515442 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 14 May 2023 13:27:50 +0300 Subject: fix model loading twice in some situations --- modules/sd_hijack.py | 3 +++ modules/sd_models.py | 3 +++ 2 files changed, 6 insertions(+) (limited to 'modules') diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 7e50f1ab..14e7f799 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -216,6 +216,9 @@ class StableDiffusionModelHijack: self.comments = [] def get_prompt_lengths(self, text): + if self.clip is None: + return "-", "-" + _, token_count = self.clip.process_texts([text]) return token_count, self.clip.get_target_prompt_token_count(token_count) diff --git a/modules/sd_models.py b/modules/sd_models.py index 4c9a0a1f..dddbc6e1 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -414,6 +414,9 @@ class SdModelData: def get_sd_model(self): if self.sd_model is None: with self.lock: + if self.sd_model is not None: + return self.sd_model + try: load_model() except Exception as e: -- cgit v1.2.1 From 742da3193290f5692901c4c614c98bec291163f2 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Mon, 15 May 2023 03:04:34 +0800 Subject: Minor changes --- modules/sd_vae_taesd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/sd_vae_taesd.py b/modules/sd_vae_taesd.py index ccc97959..927a7298 100644 --- a/modules/sd_vae_taesd.py +++ b/modules/sd_vae_taesd.py @@ -71,6 +71,6 @@ def decode(): sd_vae_taesd.eval() sd_vae_taesd.to(devices.device, devices.dtype) else: - raise FileNotFoundError('Tiny AE mdoel not found') + raise FileNotFoundError('Tiny AE model not found') return sd_vae_taesd.decoder -- cgit v1.2.1 From f517838c75014f981ae1c41f1bc776d74daf9a23 Mon Sep 17 00:00:00 2001 From: Keith <1868690+wk5ovc@users.noreply.github.com> Date: Mon, 15 May 2023 10:47:01 +0800 Subject: Fix extra networks save preview image geninfo --- modules/ui_extra_networks.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 8c3dea56..9e6e0531 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -334,9 +334,19 @@ def setup_ui(ui, gallery): assert is_allowed, f'writing to {filename} is not allowed' if geninfo: - pnginfo_data = PngImagePlugin.PngInfo() - pnginfo_data.add_text('parameters', geninfo) - image.save(filename, pnginfo=pnginfo_data) + ext = os.path.splitext(filename)[1].lower() + if ext == '.png': + pnginfo_data = PngImagePlugin.PngInfo() + pnginfo_data.add_text('parameters', geninfo) + image.save(filename, pnginfo=pnginfo_data) + elif ext in ('.jpg', '.jpeg', '.webp'): + exif_bytes = piexif.dump({ + 'Exif': {piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(geninfo or '', + encoding='unicode')} + }) + image.save(filename, exif=exif_bytes, quality=shared.opts.jpeg_quality) + else: + image.save(filename) else: image.save(filename) -- cgit v1.2.1 From 0d3a80e2692fb72da9798367b882f45312202f2e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 15 May 2023 20:33:44 +0300 Subject: Show "Loading..." for extra networks when displaying for the first time --- modules/ui_extra_networks.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 0baccf56..752cf2b8 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -268,7 +268,7 @@ def create_ui(container, button, tabname): with gr.Tab(page.title, id=page_id): elem_id = f"{tabname}_{page_id}_cards_html" - page_elem = gr.HTML('', elem_id=elem_id) + page_elem = gr.HTML('Loading...', elem_id=elem_id) ui.pages.append(page_elem) page_elem.change(fn=lambda: None, _js='function(){applyExtraNetworkFilter(' + json.dumps(tabname) + '); return []}', inputs=[], outputs=[]) @@ -282,13 +282,24 @@ def create_ui(container, button, tabname): def toggle_visibility(is_visible): is_visible = not is_visible - if is_visible and not ui.pages_contents: + return is_visible, gr.update(visible=is_visible), gr.update(variant=("secondary-down" if is_visible else "secondary")) + + def fill_tabs(is_empty): + """Creates HTML for extra networks' tabs when the extra networks button is clicked for the first time.""" + + if not ui.pages_contents: refresh() - return is_visible, gr.update(visible=is_visible), gr.update(variant=("secondary-down" if is_visible else "secondary")), *ui.pages_contents + if is_empty: + return True, *ui.pages_contents + + return True, *[gr.update() for _ in ui.pages_contents] state_visible = gr.State(value=False) - button.click(fn=toggle_visibility, inputs=[state_visible], outputs=[state_visible, container, button, *ui.pages]) + button.click(fn=toggle_visibility, inputs=[state_visible], outputs=[state_visible, container, button], show_progress=False) + + state_empty = gr.State(value=True) + button.click(fn=fill_tabs, inputs=[state_empty], outputs=[state_empty, *ui.pages], show_progress=False) def refresh(): for pg in ui.stored_extra_pages: -- cgit v1.2.1 From 0d2a4b608c075daa3a4d1a1c9df01a763ae4793a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 15 May 2023 20:57:11 +0300 Subject: load extensions' git metadata in parallel to loading the main program to save a ton of time during startup --- modules/config_states.py | 2 ++ modules/extensions.py | 12 +++++++++++- modules/ui_extensions.py | 15 +++++++++++++-- 3 files changed, 26 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/config_states.py b/modules/config_states.py index 75da862a..db65bcdb 100644 --- a/modules/config_states.py +++ b/modules/config_states.py @@ -83,6 +83,8 @@ def get_extension_config(): ext_config = {} for ext in extensions.extensions: + ext.read_info_from_repo() + entry = { "name": ext.name, "path": ext.path, diff --git a/modules/extensions.py b/modules/extensions.py index bc2c0450..1053253e 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -1,5 +1,6 @@ import os import sys +import threading import traceback import time @@ -24,6 +25,8 @@ def active(): class Extension: + lock = threading.Lock() + def __init__(self, name, path, enabled=True, is_builtin=False): self.name = name self.path = path @@ -42,8 +45,13 @@ class Extension: if self.is_builtin or self.have_info_from_repo: return - self.have_info_from_repo = True + with self.lock: + if self.have_info_from_repo: + return + self.do_read_info_from_repo() + + def do_read_info_from_repo(self): repo = None try: if os.path.exists(os.path.join(self.path, ".git")): @@ -70,6 +78,8 @@ class Extension: print(f"Failed reading extension data from Git repository ({self.name}): {ex}", file=sys.stderr) self.remote = None + self.have_info_from_repo = True + def list_files(self, subdir, extension): from modules import scripts diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index af497733..aaa7e571 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -1,6 +1,7 @@ import json import os.path import sys +import threading import time from datetime import datetime import traceback @@ -484,11 +485,18 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=" return code, list(tags) +def preload_extensions_git_metadata(): + for extension in extensions.extensions: + extension.read_info_from_repo() + + def create_ui(): import modules.ui config_states.list_config_states() + threading.Thread(target=preload_extensions_git_metadata).start() + with gr.Blocks(analytics_enabled=False) as ui: with gr.Tabs(elem_id="tabs_extensions"): with gr.TabItem("Installed", id="installed"): @@ -508,7 +516,8 @@ def create_ui(): """ info = gr.HTML(html) - extensions_table = gr.HTML(lambda: extension_table()) + extensions_table = gr.HTML('Loading...') + ui.load(fn=extension_table, inputs=[], outputs=[extensions_table]) apply.click( fn=apply_and_restart, @@ -595,7 +604,8 @@ def create_ui(): config_save_button = gr.Button(value="Save Current Config") config_states_info = gr.HTML("") - config_states_table = gr.HTML(lambda: update_config_states_table("Current")) + config_states_table = gr.HTML("Loading...") + ui.load(fn=update_config_states_table, inputs=[config_states_list], outputs=[config_states_table]) config_save_button.click(fn=save_config_state, inputs=[config_save_name], outputs=[config_states_list, config_states_info]) @@ -608,4 +618,5 @@ def create_ui(): outputs=[config_states_table], ) + return ui -- cgit v1.2.1 From a47abe1b7b667374e9df1932172230132d3fe8db Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 15 May 2023 21:22:35 +0300 Subject: update extensions table: show branch, show date in separate column, and show version from tags if available --- modules/extensions.py | 6 ++---- modules/ui_extensions.py | 7 ++++++- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'modules') diff --git a/modules/extensions.py b/modules/extensions.py index 1053253e..f16f059e 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -66,13 +66,11 @@ class Extension: try: self.status = 'unknown' self.remote = next(repo.remote().urls, None) - head = repo.head.commit self.commit_date = repo.head.commit.committed_date - ts = time.asctime(time.gmtime(self.commit_date)) if repo.active_branch: self.branch = repo.active_branch.name - self.commit_hash = head.hexsha - self.version = f'{self.commit_hash[:8]} ({ts})' + self.commit_hash = repo.head.commit.hexsha + self.version = repo.git.describe("--always", "--tags") # compared to `self.commit_hash[:8]` this takes about 30% more time total but since we run it in parallel we don't care except Exception as ex: print(f"Failed reading extension data from Git repository ({self.name}): {ex}", file=sys.stderr) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index aaa7e571..6ad9a97c 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -141,7 +141,9 @@ def extension_table(): - + + + @@ -149,6 +151,7 @@ def extension_table(): """ for ext in extensions.extensions: + ext: extensions.Extension ext.read_info_from_repo() remote = f"""{html.escape("built-in" if ext.is_builtin else ext.remote or '')}""" @@ -170,7 +173,9 @@ def extension_table(): + + {ext_status} """ -- cgit v1.2.1 From 3d76eabbca3adb711787d1802d6b61c0971b4bc0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 16 May 2023 07:59:43 +0300 Subject: add visual progress for extension installation from URL --- modules/extensions.py | 1 - modules/ui_extensions.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/extensions.py b/modules/extensions.py index f16f059e..359a7aa5 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -3,7 +3,6 @@ import sys import threading import traceback -import time import git from modules import shared diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 6ad9a97c..d7a0f685 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -593,9 +593,9 @@ def create_ui(): install_result = gr.HTML(elem_id="extension_install_result") install_button.click( - fn=modules.ui.wrap_gradio_call(install_extension_from_url, extra_outputs=[gr.update()]), + fn=modules.ui.wrap_gradio_call(lambda *args: [gr.update(), *install_extension_from_url(*args)], extra_outputs=[gr.update(), gr.update()]), inputs=[install_dirname, install_url, install_branch], - outputs=[extensions_table, install_result], + outputs=[install_url, extensions_table, install_result], ) with gr.TabItem("Backup/Restore"): -- cgit v1.2.1 From cdac5ace1456ba779d5a0171ff8757f31955bfee Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 16 May 2023 11:54:02 +0300 Subject: suppress ENSD infotext for samplers that don't use it --- modules/processing.py | 11 +++++++---- modules/sd_samplers.py | 8 +++++++- modules/sd_samplers_common.py | 21 ++++++++++++++++++++- modules/sd_samplers_compvis.py | 8 ++++++-- modules/sd_samplers_kdiffusion.py | 16 ++++++++-------- 5 files changed, 48 insertions(+), 16 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 94fe2625..15806f78 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -13,7 +13,7 @@ from skimage import exposure from typing import Any, Dict, List import modules.sd_hijack -from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts +from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common from modules.sd_hijack import model_hijack from modules.shared import opts, cmd_opts, state import modules.shared as shared @@ -480,6 +480,10 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) enable_hr = getattr(p, 'enable_hr', False) + uses_ensd = opts.eta_noise_seed_delta != 0 + if uses_ensd: + uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p) + generation_params = { "Steps": p.steps, "Sampler": p.sampler_name, @@ -496,17 +500,16 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Denoising strength": getattr(p, 'denoising_strength', None), "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, - "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, + "ENSD": opts.eta_noise_seed_delta if uses_ensd else None, "Token merging ratio": None if opts.token_merging_ratio == 0 else opts.token_merging_ratio, "Token merging ratio hr": None if not enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, + **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, } - generation_params.update(p.extra_generation_params) - generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else "" diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 4f1bf21d..f22aad8f 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -14,12 +14,18 @@ samplers_for_img2img = [] samplers_map = {} -def create_sampler(name, model): +def find_sampler_config(name): if name is not None: config = all_samplers_map.get(name, None) else: config = all_samplers[0] + return config + + +def create_sampler(name, model): + config = find_sampler_config(name) + assert config is not None, f'bad sampler name: {name}' sampler = config.constructor(model) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index bc074238..92880caf 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -2,7 +2,7 @@ from collections import namedtuple import numpy as np import torch from PIL import Image -from modules import devices, processing, images, sd_vae_approx +from modules import devices, processing, images, sd_vae_approx, sd_samplers from modules.shared import opts, state import modules.shared as shared @@ -58,6 +58,25 @@ def store_latent(decoded): shared.state.assign_current_image(sample_to_image(decoded)) +def is_sampler_using_eta_noise_seed_delta(p): + """returns whether sampler from config will use eta noise seed delta for image creation""" + + sampler_config = sd_samplers.find_sampler_config(p.sampler_name) + + eta = p.eta + + if eta is None and p.sampler is not None: + eta = p.sampler.eta + + if eta is None and sampler_config is not None: + eta = 0 if sampler_config.options.get("default_eta_is_0", False) else 1.0 + + if eta == 0: + return False + + return sampler_config.options.get("uses_ensd", False) + + class InterruptedException(BaseException): pass diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py index b1ee3be7..bdae8b40 100644 --- a/modules/sd_samplers_compvis.py +++ b/modules/sd_samplers_compvis.py @@ -11,7 +11,7 @@ import modules.models.diffusion.uni_pc samplers_data_compvis = [ - sd_samplers_common.SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {}), + sd_samplers_common.SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {"default_eta_is_0": True, "uses_ensd": True}), sd_samplers_common.SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {}), sd_samplers_common.SamplerData('UniPC', lambda model: VanillaStableDiffusionSampler(modules.models.diffusion.uni_pc.UniPCSampler, model), [], {}), ] @@ -134,7 +134,11 @@ class VanillaStableDiffusionSampler: self.update_step(x) def initialize(self, p): - self.eta = p.eta if p.eta is not None else shared.opts.eta_ddim + if self.is_ddim: + self.eta = p.eta if p.eta is not None else shared.opts.eta_ddim + else: + self.eta = 0.0 + if self.eta != 0.0: p.extra_generation_params["Eta DDIM"] = self.eta diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 61f23ad7..5455561a 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -11,21 +11,21 @@ from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback samplers_k_diffusion = [ - ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {}), + ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), ('Euler', 'sample_euler', ['k_euler'], {}), ('LMS', 'sample_lms', ['k_lms'], {}), ('Heun', 'sample_heun', ['k_heun'], {}), ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}), + ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True}), ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {}), - ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}), - ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}), + ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), + ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True}), + ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True}), ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras'}), ] -- cgit v1.2.1 From a61cbef02c7652f96d333b28e01f5230e225224e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 16 May 2023 12:36:15 +0300 Subject: add second_order field to sampler config --- modules/processing.py | 8 ++------ modules/sd_samplers_kdiffusion.py | 14 +++++++------- 2 files changed, 9 insertions(+), 13 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 15806f78..678c4468 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -681,12 +681,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: processed = Processed(p, [], p.seed, "") file.write(processed.infotext(p, 0)) - step_multiplier = 1 - if not shared.opts.dont_fix_second_order_samplers_schedule: - try: - step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1 - except Exception: - pass + sampler_config = sd_samplers.find_sampler_config(p.sampler_name) + step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1 uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 5455561a..552c6c64 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -14,20 +14,20 @@ samplers_k_diffusion = [ ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {"uses_ensd": True}), ('Euler', 'sample_euler', ['k_euler'], {}), ('LMS', 'sample_lms', ['k_lms'], {}), - ('Heun', 'sample_heun', ['k_heun'], {}), + ('Heun', 'sample_heun', ['k_heun'], {"second_order": True}), ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True}), + ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}), ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {}), + ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True}), ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}), ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}), ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True, "uses_ensd": True, "second_order": True}), + ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}), ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras'}), + ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True}), ] samplers_data_k_diffusion = [ -- cgit v1.2.1 From 6302978ff8e51ad0917c62806ca127b514088a70 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 16 May 2023 15:14:44 +0300 Subject: restore nqsp in footer that was lost during linting --- modules/ui.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index ff25c4ce..8e51e782 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1841,15 +1841,15 @@ def versions_html(): return f""" version: {tag} - • + •  python: {python_version} - • + •  torch: {getattr(torch, '__long_version__',torch.__version__)} - • + •  xformers: {xformers_version} - • + •  gradio: {gr.__version__} - • + •  checkpoint: N/A """ -- cgit v1.2.1 From ce38ee8f26d0b84888c72b58cdd9682ac3fd6151 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 16 May 2023 15:41:49 +0300 Subject: add info link for Negative Guidance minimum sigma --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 07f18b1b..3abf71c0 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -458,8 +458,8 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), + 's_min_uncond': OptionInfo(0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - 's_min_uncond': OptionInfo(0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), -- cgit v1.2.1 From 54f657ffbc3c2e297d1d81c0e2026a68ccfbd602 Mon Sep 17 00:00:00 2001 From: dennissheng Date: Wed, 17 May 2023 10:47:02 +0800 Subject: not clear checkpoints cache when config changes --- modules/sd_models.py | 1 - 1 file changed, 1 deletion(-) (limited to 'modules') diff --git a/modules/sd_models.py b/modules/sd_models.py index 36f643e1..e37fcb8f 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -538,7 +538,6 @@ def reload_model_weights(sd_model=None, info=None): if sd_model is None or checkpoint_config != sd_model.used_config: del sd_model - checkpoints_loaded.clear() load_model(checkpoint_info, already_loaded_state_dict=state_dict) return model_data.sd_model -- cgit v1.2.1 From 56a2672831751480f94a018f861f0143a8234ae8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 17 May 2023 09:24:01 +0300 Subject: return live preview defaults to how they were only download TAESD model when it's needed return calculations in single_sample_to_image to just if/elif/elif blocks keep taesd model in its own directory --- modules/sd_samplers_common.py | 29 +++++++++++++++-------------- modules/sd_vae_taesd.py | 18 +++++++++++++++--- modules/shared.py | 2 +- 3 files changed, 31 insertions(+), 18 deletions(-) (limited to 'modules') diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index b1e8a780..20a9af20 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -22,28 +22,29 @@ def setup_img2img_steps(p, steps=None): return steps, t_enc -approximation_indexes = {"Full": 0, "Tiny AE": 1, "Approx NN": 2, "Approx cheap": 3} +approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2, "TAESD": 3} def single_sample_to_image(sample, approximation=None): - if approximation is None or approximation not in approximation_indexes.keys(): - approximation = approximation_indexes.get(opts.show_progress_type, 1) - if approximation == 1: - x_sample = sd_vae_taesd.decode()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() - x_sample = sd_vae_taesd.TAESD.unscale_latents(x_sample) - x_sample = torch.clamp((x_sample * 0.25) + 0.5, 0, 1) + if approximation is None: + approximation = approximation_indexes.get(opts.show_progress_type, 0) + + if approximation == 2: + x_sample = sd_vae_approx.cheap_approximation(sample) + elif approximation == 1: + x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() + elif approximation == 3: + x_sample = sd_vae_taesd.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() + x_sample = sd_vae_taesd.TAESD.unscale_latents(x_sample) # returns value in [-2, 2] + x_sample = x_sample * 0.5 else: - if approximation == 3: - x_sample = sd_vae_approx.cheap_approximation(sample) - elif approximation == 2: - x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() - else: - x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] - x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) + x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] + x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) + return Image.fromarray(x_sample) diff --git a/modules/sd_vae_taesd.py b/modules/sd_vae_taesd.py index 927a7298..d23812ef 100644 --- a/modules/sd_vae_taesd.py +++ b/modules/sd_vae_taesd.py @@ -61,16 +61,28 @@ class TAESD(nn.Module): return x.sub(TAESD.latent_shift).mul(2 * TAESD.latent_magnitude) -def decode(): +def download_model(model_path): + model_url = 'https://github.com/madebyollin/taesd/raw/main/taesd_decoder.pth' + + if not os.path.exists(model_path): + os.makedirs(os.path.dirname(model_path), exist_ok=True) + + print(f'Downloading TAESD decoder to: {model_path}') + torch.hub.download_url_to_file(model_url, model_path) + + +def model(): global sd_vae_taesd if sd_vae_taesd is None: - model_path = os.path.join(paths_internal.models_path, "VAE-approx", "taesd_decoder.pth") + model_path = os.path.join(paths_internal.models_path, "VAE-taesd", "taesd_decoder.pth") + download_model(model_path) + if os.path.exists(model_path): sd_vae_taesd = TAESD(model_path) sd_vae_taesd.eval() sd_vae_taesd.to(devices.device, devices.dtype) else: - raise FileNotFoundError('Tiny AE model not found') + raise FileNotFoundError('TAESD model not found') return sd_vae_taesd.decoder diff --git a/modules/shared.py b/modules/shared.py index 6760a900..96036d38 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -425,7 +425,7 @@ options_templates.update(options_section(('ui', "Live previews"), { "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), "show_progress_every_n_steps": OptionInfo(10, "Show new live preview image every N sampling steps. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}), - "show_progress_type": OptionInfo("Tiny AE", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Tiny AE", "Approx NN", "Approx cheap"]}), + "show_progress_type": OptionInfo("Approx NN", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}), "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), "live_preview_refresh_period": OptionInfo(1000, "Progressbar/preview update period, in milliseconds") })) -- cgit v1.2.1 From 85b4f89926f7c3aaa7846dcbb47df3fd3b483b6b Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 11 May 2023 23:46:45 +0300 Subject: Replace state.need_restart with state.server_command + replace poll loop with signal --- modules/shared.py | 42 +++++++++++++++++++++++++++++++++++++++++- modules/ui.py | 6 +----- modules/ui_extensions.py | 7 ++----- 3 files changed, 44 insertions(+), 11 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 3abf71c0..648a2a19 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -2,6 +2,7 @@ import datetime import json import os import sys +import threading import time import gradio as gr @@ -110,8 +111,47 @@ class State: id_live_preview = 0 textinfo = None time_start = None - need_restart = False server_start = None + _server_command_signal = threading.Event() + _server_command: str | None = None + + @property + def need_restart(self) -> bool: + # Compatibility getter for need_restart. + return self.server_command == "restart" + + @need_restart.setter + def need_restart(self, value: bool) -> None: + # Compatibility setter for need_restart. + if value: + self.server_command = "restart" + + @property + def server_command(self): + return self._server_command + + @server_command.setter + def server_command(self, value: str | None) -> None: + """ + Set the server command to `value` and signal that it's been set. + """ + self._server_command = value + self._server_command_signal.set() + + def wait_for_server_command(self, timeout: float | None = None) -> str | None: + """ + Wait for server command to get set; return and clear the value and signal. + """ + if self._server_command_signal.wait(timeout): + self._server_command_signal.clear() + req = self._server_command + self._server_command = None + return req + return None + + def request_restart(self) -> None: + self.interrupt() + self.server_command = True def skip(self): self.skipped = True diff --git a/modules/ui.py b/modules/ui.py index 8e51e782..bed8464e 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1609,12 +1609,8 @@ def create_ui(): outputs=[] ) - def request_restart(): - shared.state.interrupt() - shared.state.need_restart = True - restart_gradio.click( - fn=request_restart, + fn=shared.state.request_restart, _js='restart_reload', inputs=[], outputs=[], diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index d7a0f685..4ba3bdd7 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -52,9 +52,7 @@ def apply_and_restart(disable_list, update_list, disable_all): shared.opts.disabled_extensions = disabled shared.opts.disable_all_extensions = disable_all shared.opts.save(shared.config_filename) - - shared.state.interrupt() - shared.state.need_restart = True + shared.state.request_restart() def save_config_state(name): @@ -92,8 +90,7 @@ def restore_config_state(confirmed, config_state_name, restore_type): if restore_type == "webui" or restore_type == "both": config_states.restore_webui_config(config_state) - shared.state.interrupt() - shared.state.need_restart = True + shared.state.request_restart() return "" -- cgit v1.2.1 From 875990a23213c63c19b8fdd3c87345f7a8ea2ceb Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Tue, 16 May 2023 20:58:35 +0300 Subject: Add option for /_stop route (for graceful shutdown) --- modules/cmd_args.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/cmd_args.py b/modules/cmd_args.py index f4a4ab36..6144db5c 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -103,3 +103,4 @@ parser.add_argument("--skip-version-check", action='store_true', help="Do not ch parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy') +parser.add_argument('--add-stop-route', action='store_true', help='add /_stop route to stop server') -- cgit v1.2.1 From 315f109427a5dbbf48b0da560640523800fe3c1d Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Wed, 17 May 2023 10:26:32 +0300 Subject: Copy s_min_uncond to Processed Should fix #10416 --- modules/processing.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index 678c4468..cd63b9a6 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -316,6 +316,7 @@ class Processed: self.s_tmin = p.s_tmin self.s_tmax = p.s_tmax self.s_noise = p.s_noise + self.s_min_uncond = p.s_min_uncond self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0] self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0] -- cgit v1.2.1 From 7a13a3f4ba86dc44fcf7d9944b179018744862f5 Mon Sep 17 00:00:00 2001 From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com> Date: Wed, 17 May 2023 17:39:07 +0800 Subject: TAESD fix --- modules/sd_samplers_common.py | 9 +++++---- modules/sd_vae_taesd.py | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'modules') diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index ceda6a35..d99c933d 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -35,13 +35,14 @@ def single_sample_to_image(sample, approximation=None): elif approximation == 1: x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() elif approximation == 3: - x_sample = sd_vae_taesd.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() - x_sample = sd_vae_taesd.TAESD.unscale_latents(x_sample) # returns value in [-2, 2] - x_sample = x_sample * 0.5 + x_sample = sample * 1.5 + x_sample = sd_vae_taesd.model()(x_sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() else: x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] - x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) + if approximation != 3: + x_sample = (x_sample + 1.0) / 2.0 + x_sample = torch.clamp(x_sample, min=0.0, max=1.0) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) diff --git a/modules/sd_vae_taesd.py b/modules/sd_vae_taesd.py index d23812ef..5e8496e8 100644 --- a/modules/sd_vae_taesd.py +++ b/modules/sd_vae_taesd.py @@ -45,7 +45,7 @@ def decoder(): class TAESD(nn.Module): - latent_magnitude = 2 + latent_magnitude = 3 latent_shift = 0.5 def __init__(self, decoder_path="taesd_decoder.pth"): -- cgit v1.2.1 From 1210548cba9dbd78378a710d75601922addefca2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 17 May 2023 14:53:39 +0300 Subject: simplify single_sample_to_image --- modules/sd_samplers_common.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'modules') diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index d99c933d..763829f1 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -26,22 +26,19 @@ approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2, "TAESD": def single_sample_to_image(sample, approximation=None): - if approximation is None: approximation = approximation_indexes.get(opts.show_progress_type, 0) if approximation == 2: - x_sample = sd_vae_approx.cheap_approximation(sample) + x_sample = sd_vae_approx.cheap_approximation(sample) * 0.5 + 0.5 elif approximation == 1: - x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() + x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() * 0.5 + 0.5 elif approximation == 3: x_sample = sample * 1.5 x_sample = sd_vae_taesd.model()(x_sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() else: - x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] + x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] * 0.5 + 0.5 - if approximation != 3: - x_sample = (x_sample + 1.0) / 2.0 x_sample = torch.clamp(x_sample, min=0.0, max=1.0) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) -- cgit v1.2.1 From 95cb492e4106646450480ec74014c3ec9a679c1f Mon Sep 17 00:00:00 2001 From: Weiming Date: Wed, 17 May 2023 21:25:50 +0800 Subject: Fixed: #10460 --- modules/extras.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/extras.py b/modules/extras.py index bdf9b3b7..aabb3b26 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -242,9 +242,10 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ shared.state.textinfo = "Saving" print(f"Saving to {output_modelname}...") - metadata = {"format": "pt", "sd_merge_models": {}, "sd_merge_recipe": None} + metadata = None if save_metadata: + metadata = {"format": "pt", "sd_merge_models": {}} merge_recipe = { "type": "webui", # indicate this model was merged with webui's built-in merger "primary_model_hash": primary_model_info.sha256, -- cgit v1.2.1 From 76ebf750a463bc24434048dc60e289b0b6198598 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 17 May 2023 17:44:07 +0300 Subject: use a local variable instead of dictionary entry for sd_merge_models in merge model metadata code --- modules/extras.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'modules') diff --git a/modules/extras.py b/modules/extras.py index aabb3b26..830b53aa 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -245,7 +245,8 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ metadata = None if save_metadata: - metadata = {"format": "pt", "sd_merge_models": {}} + metadata = {"format": "pt"} + merge_recipe = { "type": "webui", # indicate this model was merged with webui's built-in merger "primary_model_hash": primary_model_info.sha256, @@ -263,15 +264,17 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ } metadata["sd_merge_recipe"] = json.dumps(merge_recipe) + sd_merge_models = {} + def add_model_metadata(checkpoint_info): checkpoint_info.calculate_shorthash() - metadata["sd_merge_models"][checkpoint_info.sha256] = { + sd_merge_models[checkpoint_info.sha256] = { "name": checkpoint_info.name, "legacy_hash": checkpoint_info.hash, "sd_merge_recipe": checkpoint_info.metadata.get("sd_merge_recipe", None) } - metadata["sd_merge_models"].update(checkpoint_info.metadata.get("sd_merge_models", {})) + sd_merge_models.update(checkpoint_info.metadata.get("sd_merge_models", {})) add_model_metadata(primary_model_info) if secondary_model_info: @@ -279,7 +282,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_ if tertiary_model_info: add_model_metadata(tertiary_model_info) - metadata["sd_merge_models"] = json.dumps(metadata["sd_merge_models"]) + metadata["sd_merge_models"] = json.dumps(sd_merge_models) _, extension = os.path.splitext(output_modelname) if extension.lower() == ".safetensors": -- cgit v1.2.1 From f5092164e8e452debc889475dbaa163e4f877fda Mon Sep 17 00:00:00 2001 From: Iheuzio <97270760+Iheuzio@users.noreply.github.com> Date: Wed, 17 May 2023 12:51:54 -0400 Subject: Fix typo in syntax --- modules/ui_extra_networks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 8c3dea56..4f754f43 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -172,7 +172,7 @@ class ExtraNetworksPage: # if this is true, the item must not be show in the default view, and must instead only be # shown when searching for it - serach_only = "/." in local_path or "\\." in local_path + search_only = "/." in local_path or "\\." in local_path args = { "style": f"'display: none; {height}{width}{background_image}'", @@ -185,7 +185,7 @@ class ExtraNetworksPage: "save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {json.dumps(tabname)}, {json.dumps(item["local_preview"])})""") + '"', "search_term": item.get("search_term", ""), "metadata_button": metadata_button, - "serach_only": " search_only" if serach_only else "", + "search_only": " search_only" if search_only else "", } return self.card_page.format(**args) -- cgit v1.2.1 From 9fd6c1e3430f5947add23e2e94ac816c2546481c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 17 May 2023 20:22:38 +0300 Subject: move some settings to the new Optimization page add slider for token merging for img2img rework StableDiffusionProcessing to have the token_merging_ratio field fix a bug with applying png optimizations for live previews when they shouldn't be applied --- modules/processing.py | 52 +++++++++++++++++++++++++-------------------------- modules/progress.py | 6 +++++- modules/sd_models.py | 36 +++++++++++++++++++---------------- modules/shared.py | 8 ++++++-- 4 files changed, 56 insertions(+), 46 deletions(-) (limited to 'modules') diff --git a/modules/processing.py b/modules/processing.py index cd63b9a6..2b8dd361 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -29,12 +29,6 @@ from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion from einops import repeat, rearrange from blendmodes.blend import blendLayers, BlendType -import tomesd - -# add a logger for the processing module -logger = logging.getLogger(__name__) -# manually set output level here since there is no option to do so yet through launch options -# logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(name)s %(message)s') # some of those options should not be changed at all because they would break the model, so I removed them from options. @@ -156,6 +150,8 @@ class StableDiffusionProcessing: self.override_settings_restore_afterwards = override_settings_restore_afterwards self.is_using_inpainting_conditioning = False self.disable_extra_networks = False + self.token_merging_ratio = 0 + self.token_merging_ratio_hr = 0 if not seed_enable_extras: self.subseed = -1 @@ -171,6 +167,7 @@ class StableDiffusionProcessing: self.all_subseeds = None self.iteration = 0 self.is_hr_pass = False + self.sampler = None @property @@ -280,6 +277,12 @@ class StableDiffusionProcessing: def close(self): self.sampler = None + def get_token_merging_ratio(self, for_hr=False): + if for_hr: + return self.token_merging_ratio_hr or opts.token_merging_ratio_hr or self.token_merging_ratio or opts.token_merging_ratio + + return self.token_merging_ratio or opts.token_merging_ratio + class Processed: def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""): @@ -309,6 +312,8 @@ class Processed: self.styles = p.styles self.job_timestamp = state.job_timestamp self.clip_skip = opts.CLIP_stop_at_last_layers + self.token_merging_ratio = p.token_merging_ratio + self.token_merging_ratio_hr = p.token_merging_ratio_hr self.eta = p.eta self.ddim_discretize = p.ddim_discretize @@ -367,6 +372,9 @@ class Processed: def infotext(self, p: StableDiffusionProcessing, index): return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size) + def get_token_merging_ratio(self, for_hr=False): + return self.token_merging_ratio_hr if for_hr else self.token_merging_ratio + # from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3 def slerp(val, low, high): @@ -480,6 +488,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) enable_hr = getattr(p, 'enable_hr', False) + token_merging_ratio = p.get_token_merging_ratio() + token_merging_ratio_hr = p.get_token_merging_ratio(for_hr=True) uses_ensd = opts.eta_noise_seed_delta != 0 if uses_ensd: @@ -502,8 +512,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": opts.eta_noise_seed_delta if uses_ensd else None, - "Token merging ratio": None if opts.token_merging_ratio == 0 else opts.token_merging_ratio, - "Token merging ratio hr": None if not enable_hr or opts.token_merging_ratio_hr == 0 else opts.token_merging_ratio_hr, + "Token merging ratio": None if token_merging_ratio == 0 else token_merging_ratio, + "Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, @@ -536,17 +546,12 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_vae': sd_vae.reload_vae_weights() - if opts.token_merging_ratio > 0: - sd_models.apply_token_merging(sd_model=p.sd_model, hr=False) - logger.debug(f"Token merging applied to first pass. Ratio: '{opts.token_merging_ratio}'") + sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio()) res = process_images_inner(p) finally: - # undo model optimizations made by tomesd - if opts.token_merging_ratio > 0: - tomesd.remove_patch(p.sd_model) - logger.debug('Token merging model optimizations removed') + sd_models.apply_token_merging(p.sd_model, 0) # restore opts to original state if p.override_settings_restore_afterwards: @@ -996,21 +1001,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): x = None devices.torch_gc() - # apply token merging optimizations from tomesd for high-res pass - if opts.token_merging_ratio_hr > 0: - # in case the user has used separate merge ratios - if opts.token_merging_ratio > 0: - tomesd.remove_patch(self.sd_model) - logger.debug('Adjusting token merging ratio for high-res pass') - - sd_models.apply_token_merging(sd_model=self.sd_model, hr=True) - logger.debug(f"Applied token merging for high-res pass. Ratio: '{opts.token_merging_ratio_hr}'") + sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio(for_hr=True)) samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) - if opts.token_merging_ratio_hr > 0 or opts.token_merging_ratio > 0: - tomesd.remove_patch(self.sd_model) - logger.debug('Removed token merging optimizations from model') + sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio()) self.is_hr_pass = False @@ -1173,3 +1168,6 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): devices.torch_gc() return samples + + def get_token_merging_ratio(self, for_hr=False): + return self.token_merging_ratio or ("token_merging_ratio" in self.override_settings and opts.token_merging_ratio) or opts.token_merging_ratio_img2img or opts.token_merging_ratio diff --git a/modules/progress.py b/modules/progress.py index 269863c9..f405f07f 100644 --- a/modules/progress.py +++ b/modules/progress.py @@ -98,7 +98,11 @@ def progressapi(req: ProgressRequest): if opts.live_previews_image_format == "png": # using optimize for large images takes an enormous amount of time - save_kwargs = {"optimize": max(*image.size) > 256} + if max(*image.size) <= 256: + save_kwargs = {"optimize": True} + else: + save_kwargs = {"optimize": False, "compress_level": 1} + else: save_kwargs = {} diff --git a/modules/sd_models.py b/modules/sd_models.py index e612be10..4bd8783e 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -583,23 +583,27 @@ def unload_model_weights(sd_model=None, info=None): return sd_model -def apply_token_merging(sd_model, hr: bool): +def apply_token_merging(sd_model, token_merging_ratio): """ Applies speed and memory optimizations from tomesd. - - Args: - hr (bool): True if called in the context of a high-res pass """ - ratio = shared.opts.token_merging_ratio - if hr: - ratio = shared.opts.token_merging_ratio_hr - - tomesd.apply_patch( - sd_model, - ratio=ratio, - use_rand=False, # can cause issues with some samplers - merge_attn=True, - merge_crossattn=False, - merge_mlp=False - ) + current_token_merging_ratio = getattr(sd_model, 'applied_token_merged_ratio', 0) + + if current_token_merging_ratio == token_merging_ratio: + return + + if current_token_merging_ratio > 0: + tomesd.remove_patch(sd_model) + + if token_merging_ratio > 0: + tomesd.apply_patch( + sd_model, + ratio=token_merging_ratio, + use_rand=False, # can cause issues with some samplers + merge_attn=True, + merge_crossattn=False, + merge_mlp=False + ) + + sd_model.applied_token_merged_ratio = token_merging_ratio diff --git a/modules/shared.py b/modules/shared.py index 47bc6d0e..76af8b9c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -413,8 +413,13 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP nrtwork; 1 ignores none, 2 ignores one layer"), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"), "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different vidocard vendors"), +})) + +options_templates.update(options_section(('optimizations', "Optimizations"), { + "s_min_uncond": OptionInfo(0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), - "token_merging_ratio_hr": OptionInfo(0.0, "Togen merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}), + "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), + "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), })) options_templates.update(options_section(('compatibility', "Compatibility"), { @@ -498,7 +503,6 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; higher = more unperdictable results"), "eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}).info("noise multiplier; applies to Euler a and other samplers that have a in them"), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), - 's_min_uncond': OptionInfo(0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), -- cgit v1.2.1 From a6b618d07248267de36f0e8f4a847d997285e272 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 17 May 2023 21:03:41 +0300 Subject: use a single function for saving images with metadata both in extra networks and main mode for #10395 --- modules/images.py | 70 ++++++++++++++++++++++++++------------------ modules/ui_extra_networks.py | 19 ++---------- 2 files changed, 43 insertions(+), 46 deletions(-) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index b2de3662..4e8cd993 100644 --- a/modules/images.py +++ b/modules/images.py @@ -482,6 +482,43 @@ def get_next_sequence_number(path, basename): return result + 1 +def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_pnginfo=None): + if extension is None: + extension = os.path.splitext(filename)[1] + + image_format = Image.registered_extensions()[extension] + + existing_pnginfo = existing_pnginfo or {} + if opts.enable_pnginfo: + existing_pnginfo['parameters'] = geninfo + + if extension.lower() == '.png': + pnginfo_data = PngImagePlugin.PngInfo() + for k, v in (existing_pnginfo or {}).items(): + pnginfo_data.add_text(k, str(v)) + + image.save(filename, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data) + + elif extension.lower() in (".jpg", ".jpeg", ".webp"): + if image.mode == 'RGBA': + image = image.convert("RGB") + elif image.mode == 'I;16': + image = image.point(lambda p: p * 0.0038910505836576).convert("RGB" if extension.lower() == ".webp" else "L") + + image.save(filename, format=image_format, quality=opts.jpeg_quality, lossless=opts.webp_lossless) + + if opts.enable_pnginfo and geninfo is not None: + exif_bytes = piexif.dump({ + "Exif": { + piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(geninfo or "", encoding="unicode") + }, + }) + + piexif.insert(exif_bytes, filename) + else: + image.save(filename, format=image_format, quality=opts.jpeg_quality) + + def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None): """Save an image. @@ -566,38 +603,13 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i info = params.pnginfo.get(pnginfo_section_name, None) def _atomically_save_image(image_to_save, filename_without_extension, extension): - # save image with .tmp extension to avoid race condition when another process detects new image in the directory + """ + save image with .tmp extension to avoid race condition when another process detects new image in the directory + """ temp_file_path = f"{filename_without_extension}.tmp" - image_format = Image.registered_extensions()[extension] - - if extension.lower() == '.png': - pnginfo_data = PngImagePlugin.PngInfo() - if opts.enable_pnginfo: - for k, v in params.pnginfo.items(): - pnginfo_data.add_text(k, str(v)) - - image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data) - elif extension.lower() in (".jpg", ".jpeg", ".webp"): - if image_to_save.mode == 'RGBA': - image_to_save = image_to_save.convert("RGB") - elif image_to_save.mode == 'I;16': - image_to_save = image_to_save.point(lambda p: p * 0.0038910505836576).convert("RGB" if extension.lower() == ".webp" else "L") - - image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality, lossless=opts.webp_lossless) - - if opts.enable_pnginfo and info is not None: - exif_bytes = piexif.dump({ - "Exif": { - piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode") - }, - }) - - piexif.insert(exif_bytes, temp_file_path) - else: - image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality) + save_image_with_geninfo(image_to_save, info, temp_file_path, extension, params.pnginfo) - # atomically rename the file with correct extension os.replace(temp_file_path, filename_without_extension + extension) fullfn_without_extension, extension = os.path.splitext(params.filename) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 471df23b..c6e45fb1 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -4,7 +4,7 @@ from pathlib import Path from PIL import PngImagePlugin from modules import shared -from modules.images import read_info_from_image +from modules.images import read_info_from_image, save_image_with_geninfo import gradio as gr import json import html @@ -343,22 +343,7 @@ def setup_ui(ui, gallery): assert is_allowed, f'writing to {filename} is not allowed' - if geninfo: - ext = os.path.splitext(filename)[1].lower() - if ext == '.png': - pnginfo_data = PngImagePlugin.PngInfo() - pnginfo_data.add_text('parameters', geninfo) - image.save(filename, pnginfo=pnginfo_data) - elif ext in ('.jpg', '.jpeg', '.webp'): - exif_bytes = piexif.dump({ - 'Exif': {piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(geninfo or '', - encoding='unicode')} - }) - image.save(filename, exif=exif_bytes, quality=shared.opts.jpeg_quality) - else: - image.save(filename) - else: - image.save(filename) + save_image_with_geninfo(image, geninfo, filename) return [page.create_html(ui.tabname) for page in ui.stored_extra_pages] -- cgit v1.2.1 From 8fe9ea7f4d8fd76038db61e1fd2b1cc5927ce108 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 17 May 2023 21:45:26 +0300 Subject: add options to show/hide hidden files and dirs, and to not list models/files in hidden directories --- modules/shared.py | 6 ++++++ modules/ui_extra_networks.py | 17 +++++++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 76af8b9c..23563582 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -378,6 +378,7 @@ options_templates.update(options_section(('system', "System"), { "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), "print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."), + "list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""), })) options_templates.update(options_section(('training', "Training"), { @@ -446,6 +447,8 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"), })) options_templates.update(options_section(('extra_networks', "Extra Networks"), { + "extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."), + "extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'), "extra_networks_default_view": OptionInfo("cards", "Default view for Extra Networks", gr.Dropdown, {"choices": ["cards", "thumbs"]}), "extra_networks_default_multiplier": OptionInfo(1.0, "Multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"), @@ -825,4 +828,7 @@ def walk_files(path, allowed_extensions=None): if ext not in allowed_extensions: continue + if not opts.list_hidden_files and ("/." in root or "\\." in root): + continue + yield os.path.join(root, filename) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index c6e45fb1..8669cc1a 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -105,6 +105,9 @@ class ExtraNetworksPage: if not is_empty and not subdir.endswith("/"): subdir = subdir + "/" + if ("/." in subdir or subdir.startswith(".")) and not shared.opts.extra_networks_show_hidden_directories: + continue + subdirs[subdir] = 1 if subdirs: @@ -147,6 +150,10 @@ class ExtraNetworksPage: return [] def create_html_for_item(self, item, tabname): + """ + Create HTML for card item in tab tabname; can return empty string if the item is not meant to be shown. + """ + preview = item.get("preview", None) onclick = item.get("onclick", None) @@ -169,9 +176,15 @@ class ExtraNetworksPage: if filename.startswith(absdir): local_path = filename[len(absdir):] - # if this is true, the item must not be show in the default view, and must instead only be + # if this is true, the item must not be shown in the default view, and must instead only be # shown when searching for it - search_only = "/." in local_path or "\\." in local_path + if shared.opts.extra_networks_hidden_models == "Always": + search_only = False + else: + search_only = "/." in local_path or "\\." in local_path + + if search_only and shared.opts.extra_networks_hidden_models == "Never": + return "" args = { "style": f"'display: none; {height}{width}{background_image}'", -- cgit v1.2.1 From f6fc7916c4615c4f5cac97cab5add9b0536f6efa Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 17 May 2023 22:43:24 +0300 Subject: add /sdapi/v1/script-info api --- modules/api/api.py | 13 +++++++++++-- modules/api/models.py | 21 +++++++++++++++++++-- modules/scripts.py | 29 ++++++++++++++++++++++++++++- 3 files changed, 58 insertions(+), 5 deletions(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 165985c3..eee99bbb 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -204,6 +204,7 @@ class Api: self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"]) self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"]) self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList) + self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=List[models.ScriptInfo]) self.default_script_arg_txt2img = [] self.default_script_arg_img2img = [] @@ -229,11 +230,19 @@ class Api: return script, script_idx def get_scripts_list(self): - t2ilist = [str(title.lower()) for title in scripts.scripts_txt2img.titles] - i2ilist = [str(title.lower()) for title in scripts.scripts_img2img.titles] + t2ilist = [script.name for script in scripts.scripts_txt2img.scripts if script.name is not None] + i2ilist = [script.name for script in scripts.scripts_img2img.scripts if script.name is not None] return models.ScriptsList(txt2img=t2ilist, img2img=i2ilist) + def get_script_info(self): + res = [] + + for script_list in [scripts.scripts_txt2img.scripts, scripts.scripts_img2img.scripts]: + res += [script.api_info for script in script_list if script.api_info is not None] + + return res + def get_script(self, script_name, script_runner): if script_name is None or script_name == "": return None, None diff --git a/modules/api/models.py b/modules/api/models.py index 006ccdb7..1ff2fb33 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -287,6 +287,23 @@ class MemoryResponse(BaseModel): ram: dict = Field(title="RAM", description="System memory stats") cuda: dict = Field(title="CUDA", description="nVidia CUDA memory stats") + class ScriptsList(BaseModel): - txt2img: list = Field(default=None,title="Txt2img", description="Titles of scripts (txt2img)") - img2img: list = Field(default=None,title="Img2img", description="Titles of scripts (img2img)") + txt2img: list = Field(default=None, title="Txt2img", description="Titles of scripts (txt2img)") + img2img: list = Field(default=None, title="Img2img", description="Titles of scripts (img2img)") + + +class ScriptArg(BaseModel): + label: str = Field(default=None, title="Label", description="Name of the argument in UI") + value: Optional[Any] = Field(default=None, title="Value", description="Default value of the argument") + minimum: Optional[Any] = Field(default=None, title="Minimum", description="Minimum allowed value for the argumentin UI") + maximum: Optional[Any] = Field(default=None, title="Minimum", description="Maximum allowed value for the argumentin UI") + step: Optional[Any] = Field(default=None, title="Minimum", description="Step for changing value of the argumentin UI") + choices: Optional[List[str]] = Field(default=None, title="Choices", description="Possible values for the argument") + + +class ScriptInfo(BaseModel): + name: str = Field(default=None, title="Name", description="Script name") + is_alwayson: bool = Field(default=None, title="IsAlwayson", description="Flag specifying whether this script is an alwayson script") + is_img2img: bool = Field(default=None, title="IsImg2img", description="Flag specifying whether this script is an img2img script") + args: List[ScriptArg] = Field(title="Arguments", description="List of script's arguments") diff --git a/modules/scripts.py b/modules/scripts.py index 0c12ebd5..e33d8c81 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -17,6 +17,9 @@ class PostprocessImageArgs: class Script: + name = None + """script's internal name derived from title""" + filename = None args_from = None args_to = None @@ -25,8 +28,8 @@ class Script: is_txt2img = False is_img2img = False - """A gr.Group component that has all script's UI inside it""" group = None + """A gr.Group component that has all script's UI inside it""" infotext_fields = None """if set in ui(), this is a list of pairs of gradio component + text; the text will be used when @@ -38,6 +41,9 @@ class Script: various "Send to " buttons when clicked """ + api_info = None + """Generated value of type modules.api.models.ScriptInfo with information about the script for API""" + def title(self): """this function should return the title of the script. This is what will be displayed in the dropdown menu.""" @@ -313,6 +319,8 @@ class ScriptRunner: self.selectable_scripts.append(script) def setup_ui(self): + import modules.api.models as api_models + self.titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.selectable_scripts] inputs = [None] @@ -327,9 +335,28 @@ class ScriptRunner: if controls is None: return + script.name = wrap_call(script.title, script.filename, "title", default=script.filename).lower() + api_args = [] + for control in controls: control.custom_script_source = os.path.basename(script.filename) + arg_info = api_models.ScriptArg(label=control.label or "") + + for field in ("value", "minimum", "maximum", "step", "choices"): + v = getattr(control, field, None) + if v is not None: + setattr(arg_info, field, v) + + api_args.append(arg_info) + + script.api_info = api_models.ScriptInfo( + name=script.name, + is_img2img=script.is_img2img, + is_alwayson=script.alwayson, + args=api_args, + ) + if script.infotext_fields is not None: self.infotext_fields += script.infotext_fields -- cgit v1.2.1 From ad3a7f2ab9bb459ba86bcfb9041c5dbbac7841ee Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 17 May 2023 22:50:08 +0300 Subject: alternative solution to fix styles load when edited by human #9765 as suggested by akx --- modules/styles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/styles.py b/modules/styles.py index c22769cf..34e1b5e1 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -43,7 +43,7 @@ class StyleDatabase: return with open(self.path, "r", encoding="utf-8-sig", newline='') as file: - reader = csv.DictReader(file) + reader = csv.DictReader(file, skipinitialspace=True) for row in reader: # Support loading old CSV format with "name, text"-columns prompt = row["prompt"] if "prompt" in row else row["text"] -- cgit v1.2.1 From b397f63e00bbfbe9087d80abb457aa9a593b181b Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 17 May 2023 23:11:33 +0300 Subject: add option to reorder tabs fix Reload UI not working --- modules/shared.py | 3 ++- modules/ui.py | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index 23563582..332cf1cf 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -151,7 +151,7 @@ class State: def request_restart(self) -> None: self.interrupt() - self.server_command = True + self.server_command = "restart" def skip(self): self.skipped = True @@ -478,6 +478,7 @@ options_templates.update(options_section(('ui', "User interface"), { "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing ", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_restart(), + "ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(), "hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_restart(), diff --git a/modules/ui.py b/modules/ui.py index bed8464e..a47af214 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1644,7 +1644,10 @@ def create_ui(): parameters_copypaste.connect_paste_params_buttons() with gr.Tabs(elem_id="tabs") as tabs: - for interface, label, ifid in interfaces: + tab_order = {k: i for i, k in enumerate(opts.ui_tab_order)} + sorted_interfaces = sorted(interfaces, key=lambda x: tab_order.get(x[1], 9999)) + + for interface, label, ifid in sorted_interfaces: if label in shared.opts.hidden_tabs: continue with gr.TabItem(label, id=ifid, elem_id=f"tab_{ifid}"): -- cgit v1.2.1
PathOld valueNew value
{path}{old_value}{new_value}
No changes
{html.escape(description)}

Added: {html.escape(added)}

{install_code}
Extension URLVersionBranchVersionDate Update
{html.escape(ext.name)} {remote}{ext.branch} {version_link}{time.asctime(time.gmtime(ext.commit_date))}