From d587586d3be2de061238defb8a556f03743287f6 Mon Sep 17 00:00:00 2001 From: mawr Date: Mon, 31 Oct 2022 00:14:07 +0300 Subject: Added "--clip-models-path" switch to avoid using default "~/.cache/clip" and enable to run under unprivileged user without homedir --- modules/interrogate.py | 4 ++-- modules/shared.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/interrogate.py b/modules/interrogate.py index 65b05d34..9769aa34 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -56,9 +56,9 @@ class InterrogateModels: import clip if self.running_on_cpu: - model, preprocess = clip.load(clip_model_name, device="cpu") + model, preprocess = clip.load(clip_model_name, device="cpu", download_root=shared.cmd_opts.clip_models_path) else: - model, preprocess = clip.load(clip_model_name) + model, preprocess = clip.load(clip_model_name, download_root=shared.cmd_opts.clip_models_path) model.eval() model = model.to(devices.device_interrogate) diff --git a/modules/shared.py b/modules/shared.py index e4f163c1..36212031 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -51,6 +51,7 @@ parser.add_argument("--realesrgan-models-path", type=str, help="Path to director parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET')) parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR')) parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR')) +parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None) parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers") parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work") parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator") -- cgit v1.2.1 From 21fba39c609859a60616420afda3b34a89e00761 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 30 Oct 2022 23:45:52 +0000 Subject: Add callbacks and param objects --- modules/script_callbacks.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 6ea58d61..a206ea59 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -24,12 +24,22 @@ class ImageSaveParams: """dictionary with parameters for image's PNG info data; infotext will have the key 'parameters'""" +class CGFDenoiserParams: + def __init__(self, x_in, image_cond_in, sigma_in, sampling_step, total_sampling_steps): + self.x_in = x_in + self.image_cond_in = image_cond_in + self.sigma_in = sigma_in + self.sampling_step = sampling_step + self.total_sampling_steps = total_sampling_steps + + ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"]) callbacks_model_loaded = [] callbacks_ui_tabs = [] callbacks_ui_settings = [] callbacks_before_image_saved = [] callbacks_image_saved = [] +callbacks_cfg_denoiser = [] def clear_callbacks(): @@ -84,6 +94,14 @@ def image_saved_callback(params: ImageSaveParams): report_exception(c, 'image_saved_callback') +def cfg_denoiser_callback(params: CGFDenoiserParams): + for c in callbacks_cfg_denoiser: + try: + c.callback(params) + except Exception: + report_exception(c, 'cfg_denoiser_callback') + + def add_callback(callbacks, fun): stack = [x for x in inspect.stack() if x.filename != __file__] filename = stack[0].filename if len(stack) > 0 else 'unknown file' @@ -130,3 +148,12 @@ def on_image_saved(callback): - params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing. """ add_callback(callbacks_image_saved, callback) + + +def on_cfg_denoiser(callback): + """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs. + The callback is called with one argument: + - params: CGFDenoiserParams - parameters to be passed to the inner model and sampling state details. + """ + add_callback(callbacks_cfg_denoiser, callback) + -- cgit v1.2.1 From 8906be85ac91310b37dccddc44f23631eb7a15f5 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 30 Oct 2022 23:47:08 +0000 Subject: add callback cleardown --- modules/script_callbacks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index a206ea59..b0b8dc47 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -48,7 +48,7 @@ def clear_callbacks(): callbacks_ui_settings.clear() callbacks_before_image_saved.clear() callbacks_image_saved.clear() - + callbacks_cfg_denoiser.clear() def model_loaded_callback(sd_model): for c in callbacks_model_loaded: -- cgit v1.2.1 From 8ae0ea9deaa5a09d1e0aa8b2f8e97c38d71cdbda Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 30 Oct 2022 23:48:33 +0000 Subject: Add callback to sd_samplers --- modules/sd_samplers.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'modules') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 3670b57d..30cb5c4b 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -11,6 +11,7 @@ from modules import prompt_parser, devices, processing, images from modules.shared import opts, cmd_opts, state import modules.shared as shared +from modules.script_callbacks import CGFDenoiserParams, cfg_denoiser_callback SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) @@ -278,6 +279,8 @@ class CFGDenoiser(torch.nn.Module): image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond]) sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) + cfg_denoiser_callback(CGFDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps)) + if tensor.shape[1] == uncond.shape[1]: cond_in = torch.cat([tensor, uncond]) -- cgit v1.2.1 From adaa699e3888e1396162083d65c63cd6774cc6b0 Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 18:08:40 +0800 Subject: prototype interrupt api --- modules/api/api.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index 6c06d449..e702c9c0 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -40,6 +40,7 @@ class Api: self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse) self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse) + self.app.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"]) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -176,6 +177,11 @@ class Api: return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image) + def interruptapi(self): + shared.state.interrupt() + + return {} + def launch(self, server_name, port): self.app.include_router(self.router) uvicorn.run(self.app, host=server_name, port=port) -- cgit v1.2.1 From 29f758afe9c09834a1cd2dac3f5dd6fbf7dfeaa7 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Mon, 31 Oct 2022 02:39:55 +0000 Subject: Extend extras image cache with upscale_first arg --- modules/extras.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/extras.py b/modules/extras.py index 4d51088b..8e2ab35c 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -141,7 +141,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ upscaling_resize_w, upscaling_resize_h, upscaling_crop) cache_key = LruCache.Key(image_hash=hash(np.array(image.getdata()).tobytes()), info_hash=hash(info), - args_hash=hash(upscale_args)) + args_hash=hash((upscale_args, upscale_first))) cached_entry = cached_images.get(cache_key) if cached_entry is None: res = upscale(image, *upscale_args) -- cgit v1.2.1 From 006756f9cd6258eae418e9209cfc13f940ec53e1 Mon Sep 17 00:00:00 2001 From: Fampai <> Date: Mon, 31 Oct 2022 07:26:08 -0400 Subject: Added TI training optimizations option to use xattention optimizations when training option to unload vae when training --- modules/shared.py | 3 ++- modules/textual_inversion/textual_inversion.py | 9 +++++++++ modules/textual_inversion/ui.py | 7 +++++-- 3 files changed, 16 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/shared.py b/modules/shared.py index fb84afd8..4c3d0ce7 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -256,11 +256,12 @@ options_templates.update(options_section(('system', "System"), { })) options_templates.update(options_section(('training', "Training"), { - "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM."), + "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"), + "training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"), })) options_templates.update(options_section(('sd', "Stable Diffusion"), { diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 17dfb223..b0a1d26b 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -214,6 +214,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name) + unload = shared.opts.unload_models_when_training if save_embedding_every > 0: embedding_dir = os.path.join(log_directory, "embeddings") @@ -238,6 +239,8 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." with torch.autocast("cuda"): ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file, batch_size=batch_size) + if unload: + shared.sd_model.first_stage_model.to(devices.cpu) hijack = sd_hijack.model_hijack @@ -303,6 +306,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc if images_dir is not None and steps_done % create_image_every == 0: forced_filename = f'{embedding_name}-{steps_done}' last_saved_image = os.path.join(images_dir, forced_filename) + + shared.sd_model.first_stage_model.to(devices.device) + p = processing.StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, do_not_save_grid=True, @@ -330,6 +336,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc processed = processing.process_images(p) image = processed.images[0] + if unload: + shared.sd_model.first_stage_model.to(devices.cpu) + shared.state.current_image = image if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded: diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py index e712284d..d679e6f4 100644 --- a/modules/textual_inversion/ui.py +++ b/modules/textual_inversion/ui.py @@ -25,8 +25,10 @@ def train_embedding(*args): assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible' + apply_optimizations = shared.opts.training_xattention_optimizations try: - sd_hijack.undo_optimizations() + if not apply_optimizations: + sd_hijack.undo_optimizations() embedding, filename = modules.textual_inversion.textual_inversion.train_embedding(*args) @@ -38,5 +40,6 @@ Embedding saved to {html.escape(filename)} except Exception: raise finally: - sd_hijack.apply_optimizations() + if not apply_optimizations: + sd_hijack.apply_optimizations() -- cgit v1.2.1 From 890e68aaf75ae80d5eb2fa95b4bf1adf78b96881 Mon Sep 17 00:00:00 2001 From: Fampai <> Date: Mon, 31 Oct 2022 10:07:12 -0400 Subject: Fixed minor bug when unloading vae during TI training, generating images after training will error out --- modules/textual_inversion/textual_inversion.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 54a734f1..0aeb0459 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -409,6 +409,7 @@ Last saved image: {html.escape(last_saved_image)}
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True) + shared.sd_model.first_stage_model.to(devices.device) return embedding, filename -- cgit v1.2.1 From 8792be50078c2e89ac2fa8ff4e4c1ca82f140d45 Mon Sep 17 00:00:00 2001 From: timntorres Date: Mon, 31 Oct 2022 17:29:04 -0700 Subject: Add PNG info to pngs only if option is enabled. --- modules/images.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/images.py b/modules/images.py index a0728553..ae705cbd 100644 --- a/modules/images.py +++ b/modules/images.py @@ -510,8 +510,9 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i if extension.lower() == '.png': pnginfo_data = PngImagePlugin.PngInfo() - for k, v in params.pnginfo.items(): - pnginfo_data.add_text(k, str(v)) + if opts.enable_pnginfo: + for k, v in params.pnginfo.items(): + pnginfo_data.add_text(k, str(v)) image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo_data) -- cgit v1.2.1 From 198a1ffcfc963a3d74674fad560e87dbebf7949f Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 1 Nov 2022 19:13:59 +0300 Subject: fix API returning extra stuff in base64 encoded iamges for #3972 --- modules/api/api.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/api/api.py b/modules/api/api.py index e702c9c0..bb87d795 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,6 +1,8 @@ +import base64 +import io import time import uvicorn -from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image +from gradio.processing_utils import decode_base64_to_file, decode_base64_to_image from fastapi import APIRouter, Depends, HTTPException import modules.shared as shared from modules import devices @@ -29,6 +31,12 @@ def setUpscalers(req: dict): return reqDict +def encode_pil_to_base64(image): + buffer = io.BytesIO() + image.save(buffer, format="png") + return base64.b64encode(buffer.getvalue()) + + class Api: def __init__(self, app, queue_lock): self.router = APIRouter() -- cgit v1.2.1 From cd88e21dc5d5cfdfbd408454acd259b7db9d0ec8 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Wed, 2 Nov 2022 00:34:58 +0000 Subject: Class Name typo and add descriptions to fields. --- modules/script_callbacks.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) (limited to 'modules') diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index b0b8dc47..ff40b056 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -24,13 +24,22 @@ class ImageSaveParams: """dictionary with parameters for image's PNG info data; infotext will have the key 'parameters'""" -class CGFDenoiserParams: - def __init__(self, x_in, image_cond_in, sigma_in, sampling_step, total_sampling_steps): - self.x_in = x_in - self.image_cond_in = image_cond_in - self.sigma_in = sigma_in +class CFGDenoiserParams: + def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps): + self.x = x + """Latent image representation in the process of being denoised""" + + self.image_cond = image_cond + """Conditioning image""" + + self.sigma = sigma + """Current sigma noise step value""" + self.sampling_step = sampling_step + """Current Sampling step number""" + self.total_sampling_steps = total_sampling_steps + """Total number of sampling steps planned""" ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"]) @@ -94,7 +103,7 @@ def image_saved_callback(params: ImageSaveParams): report_exception(c, 'image_saved_callback') -def cfg_denoiser_callback(params: CGFDenoiserParams): +def cfg_denoiser_callback(params: CFGDenoiserParams): for c in callbacks_cfg_denoiser: try: c.callback(params) @@ -153,7 +162,7 @@ def on_image_saved(callback): def on_cfg_denoiser(callback): """register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs. The callback is called with one argument: - - params: CGFDenoiserParams - parameters to be passed to the inner model and sampling state details. + - params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details. """ add_callback(callbacks_cfg_denoiser, callback) -- cgit v1.2.1 From 5b6bedf6f2ebacb7f1f5809af8e26a6a1af16e2a Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Wed, 2 Nov 2022 00:38:17 +0000 Subject: Update class name and assign back to vars --- modules/sd_samplers.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'modules') diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 30cb5c4b..ebc0d896 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -11,7 +11,7 @@ from modules import prompt_parser, devices, processing, images from modules.shared import opts, cmd_opts, state import modules.shared as shared -from modules.script_callbacks import CGFDenoiserParams, cfg_denoiser_callback +from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) @@ -279,7 +279,11 @@ class CFGDenoiser(torch.nn.Module): image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond]) sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) - cfg_denoiser_callback(CGFDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps)) + denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps) + cfg_denoiser_callback(denoiser_params) + x_in = denoiser_params.x + image_cond_in = denoiser_params.image_cond + sigma_in = denoiser_params.sigma if tensor.shape[1] == uncond.shape[1]: cond_in = torch.cat([tensor, uncond]) -- cgit v1.2.1 From c9148b2312b36fee8727f5233da9dbe32aa1f58c Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Tue, 1 Nov 2022 21:56:47 -0300 Subject: Release processing resources after it finishes --- modules/img2img.py | 2 ++ modules/processing.py | 7 ++++--- modules/txt2img.py | 2 ++ 3 files changed, 8 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/img2img.py b/modules/img2img.py index 35c5df9b..fac010aa 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -137,6 +137,8 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro if processed is None: processed = process_images(p) + p.close() + shared.total_tqdm.clear() generation_info_js = processed.js() diff --git a/modules/processing.py b/modules/processing.py index 57d3a523..b541ee2b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -202,6 +202,10 @@ class StableDiffusionProcessing(): def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength): raise NotImplementedError() + def close(self): + self.sd_model = None + self.sampler = None + class Processed: def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None): @@ -597,9 +601,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.scripts is not None: p.scripts.postprocess(p, res) - p.sd_model = None - p.sampler = None - return res diff --git a/modules/txt2img.py b/modules/txt2img.py index c9d5a090..8e4e8677 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -47,6 +47,8 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: if processed is None: processed = process_images(p) + p.close() + shared.total_tqdm.clear() generation_info_js = processed.js() -- cgit v1.2.1 From 5510c282b1f1974005790066b5e444f74a5178fb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 2 Nov 2022 07:26:31 +0300 Subject: fix for extensions' javascript not loading --- modules/ui.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index 2c15abb7..a94f46ea 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -671,6 +671,8 @@ def create_ui(wrap_gradio_gpu_call): import modules.img2img import modules.txt2img + reload_javascript() + parameters_copypaste.reset() with gr.Blocks(analytics_enabled=False) as txt2img_interface: @@ -1782,4 +1784,3 @@ def load_javascript(raw_response): reload_javascript = partial(load_javascript, gradio.routes.templates.TemplateResponse) -reload_javascript() -- cgit v1.2.1 From 95c6308ccd2e075d1fb804f5b98a4f0b07b87b7d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 2 Nov 2022 09:47:53 +0300 Subject: switch to gradio 3.8 --- modules/ui.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'modules') diff --git a/modules/ui.py b/modules/ui.py index a94f46ea..45cd8c3f 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1572,8 +1572,7 @@ def create_ui(wrap_gradio_gpu_call): reload_script_bodies.click( fn=reload_scripts, inputs=[], - outputs=[], - _js='function(){}' + outputs=[] ) def request_restart(): @@ -1585,7 +1584,7 @@ def create_ui(wrap_gradio_gpu_call): fn=request_restart, inputs=[], outputs=[], - _js='function(){restart_reload()}' + _js='restart_reload' ) if column is not None: -- cgit v1.2.1