From d8b90ac121cbf0c18b1dc9d56a5e1d14ca51e74e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 15 Jan 2023 18:50:56 +0300 Subject: big rework of progressbar/preview system to allow multiple users to prompts at the same time and do not get previews of each other --- modules/shared.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'modules/shared.py') diff --git a/modules/shared.py b/modules/shared.py index 51df056c..de99aca9 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -152,6 +152,7 @@ def reload_hypernetworks(): hypernetwork.load_hypernetwork(opts.sd_hypernetwork) + class State: skipped = False interrupted = False @@ -165,6 +166,7 @@ class State: current_latent = None current_image = None current_image_sampling_step = 0 + id_live_preview = 0 textinfo = None time_start = None need_restart = False @@ -207,6 +209,7 @@ class State: self.current_latent = None self.current_image = None self.current_image_sampling_step = 0 + self.id_live_preview = 0 self.skipped = False self.interrupted = False self.textinfo = None @@ -220,8 +223,8 @@ class State: devices.torch_gc() - """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" def set_current_image(self): + """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" if not parallel_processing_allowed: return @@ -234,12 +237,16 @@ class State: import modules.sd_samplers if opts.show_progress_grid: - self.current_image = modules.sd_samplers.samples_to_image_grid(self.current_latent) + self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent)) else: - self.current_image = modules.sd_samplers.sample_to_image(self.current_latent) + self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent)) self.current_image_sampling_step = self.sampling_step + def assign_current_image(self, image): + self.current_image = image + self.id_live_preview += 1 + state = State() state.server_start = time.time() @@ -424,8 +431,6 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"), })) options_templates.update(options_section(('ui', "User interface"), { - "show_progressbar": OptionInfo(True, "Show progressbar"), - "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), "return_grid": OptionInfo(True, "Show grid in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), @@ -446,6 +451,7 @@ options_templates.update(options_section(('ui', "User interface"), { options_templates.update(options_section(('ui', "Live previews"), { "live_previews_enable": OptionInfo(True, "Show live previews of the created image"), + "show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"), "show_progress_every_n_steps": OptionInfo(10, "Show new live preview image every N sampling steps. Set to -1 to show after completion of batch.", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}), "show_progress_type": OptionInfo("Approx NN", "Image creation progress preview mode", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap"]}), "live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}), -- cgit v1.2.1