diff options
Diffstat (limited to 'modules')
28 files changed, 691 insertions, 179 deletions
diff --git a/modules/api/api.py b/modules/api/api.py index 11045292..2a4cd8a2 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,5 +1,6 @@ import base64 import io +import os import time import datetime import uvicorn @@ -98,14 +99,16 @@ def encode_pil_to_base64(image): def api_middleware(app: FastAPI): - rich_available = True + rich_available = False try: - import anyio # importing just so it can be placed on silent list - import starlette # importing just so it can be placed on silent list - from rich.console import Console - console = Console() + if os.environ.get('WEBUI_RICH_EXCEPTIONS', None) is not None: + import anyio # importing just so it can be placed on silent list + import starlette # importing just so it can be placed on silent list + from rich.console import Console + console = Console() + rich_available = True except Exception: - rich_available = False + pass @app.middleware("http") async def log_and_time(req: Request, call_next): @@ -116,14 +119,14 @@ def api_middleware(app: FastAPI): endpoint = req.scope.get('path', 'err') if shared.cmd_opts.api_log and endpoint.startswith('/sdapi'): print('API {t} {code} {prot}/{ver} {method} {endpoint} {cli} {duration}'.format( - t = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"), - code = res.status_code, - ver = req.scope.get('http_version', '0.0'), - cli = req.scope.get('client', ('0:0.0.0', 0))[0], - prot = req.scope.get('scheme', 'err'), - method = req.scope.get('method', 'err'), - endpoint = endpoint, - duration = duration, + t=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"), + code=res.status_code, + ver=req.scope.get('http_version', '0.0'), + cli=req.scope.get('client', ('0:0.0.0', 0))[0], + prot=req.scope.get('scheme', 'err'), + method=req.scope.get('method', 'err'), + endpoint=endpoint, + duration=duration, )) return res @@ -134,7 +137,7 @@ def api_middleware(app: FastAPI): "body": vars(e).get('body', ''), "errors": str(e), } - if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions + if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions message = f"API error: {request.method}: {request.url} {err}" if rich_available: print(message) diff --git a/modules/api/models.py b/modules/api/models.py index b5683071..bf97b1a3 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,4 +1,5 @@ import inspect + from pydantic import BaseModel, Field, create_model from typing import Any, Optional from typing_extensions import Literal @@ -207,11 +208,12 @@ class PreprocessResponse(BaseModel): fields = {} for key, metadata in opts.data_labels.items(): value = opts.data.get(key) - optType = opts.typemap.get(type(metadata.default), type(value)) + optType = opts.typemap.get(type(metadata.default), type(metadata.default)) - if (metadata is not None): - fields.update({key: (Optional[optType], Field( - default=metadata.default ,description=metadata.label))}) + if metadata.default is None: + pass + elif metadata is not None: + fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))}) else: fields.update({key: (Optional[optType], Field())}) diff --git a/modules/cache.py b/modules/cache.py new file mode 100644 index 00000000..71fe6302 --- /dev/null +++ b/modules/cache.py @@ -0,0 +1,120 @@ +import json
+import os.path
+import threading
+import time
+
+from modules.paths import data_path, script_path
+
+cache_filename = os.path.join(data_path, "cache.json")
+cache_data = None
+cache_lock = threading.Lock()
+
+dump_cache_after = None
+dump_cache_thread = None
+
+
+def dump_cache():
+ """
+ Marks cache for writing to disk. 5 seconds after no one else flags the cache for writing, it is written.
+ """
+
+ global dump_cache_after
+ global dump_cache_thread
+
+ def thread_func():
+ global dump_cache_after
+ global dump_cache_thread
+
+ while dump_cache_after is not None and time.time() < dump_cache_after:
+ time.sleep(1)
+
+ with cache_lock:
+ with open(cache_filename, "w", encoding="utf8") as file:
+ json.dump(cache_data, file, indent=4)
+
+ dump_cache_after = None
+ dump_cache_thread = None
+
+ with cache_lock:
+ dump_cache_after = time.time() + 5
+ if dump_cache_thread is None:
+ dump_cache_thread = threading.Thread(name='cache-writer', target=thread_func)
+ dump_cache_thread.start()
+
+
+def cache(subsection):
+ """
+ Retrieves or initializes a cache for a specific subsection.
+
+ Parameters:
+ subsection (str): The subsection identifier for the cache.
+
+ Returns:
+ dict: The cache data for the specified subsection.
+ """
+
+ global cache_data
+
+ if cache_data is None:
+ with cache_lock:
+ if cache_data is None:
+ if not os.path.isfile(cache_filename):
+ cache_data = {}
+ else:
+ try:
+ with open(cache_filename, "r", encoding="utf8") as file:
+ cache_data = json.load(file)
+ except Exception:
+ os.replace(cache_filename, os.path.join(script_path, "tmp", "cache.json"))
+ print('[ERROR] issue occurred while trying to read cache.json, move current cache to tmp/cache.json and create new cache')
+ cache_data = {}
+
+ s = cache_data.get(subsection, {})
+ cache_data[subsection] = s
+
+ return s
+
+
+def cached_data_for_file(subsection, title, filename, func):
+ """
+ Retrieves or generates data for a specific file, using a caching mechanism.
+
+ Parameters:
+ subsection (str): The subsection of the cache to use.
+ title (str): The title of the data entry in the subsection of the cache.
+ filename (str): The path to the file to be checked for modifications.
+ func (callable): A function that generates the data if it is not available in the cache.
+
+ Returns:
+ dict or None: The cached or generated data, or None if data generation fails.
+
+ The `cached_data_for_file` function implements a caching mechanism for data stored in files.
+ It checks if the data associated with the given `title` is present in the cache and compares the
+ modification time of the file with the cached modification time. If the file has been modified,
+ the cache is considered invalid and the data is regenerated using the provided `func`.
+ Otherwise, the cached data is returned.
+
+ If the data generation fails, None is returned to indicate the failure. Otherwise, the generated
+ or cached data is returned as a dictionary.
+ """
+
+ existing_cache = cache(subsection)
+ ondisk_mtime = os.path.getmtime(filename)
+
+ entry = existing_cache.get(title)
+ if entry:
+ cached_mtime = entry.get("mtime", 0)
+ if ondisk_mtime > cached_mtime:
+ entry = None
+
+ if not entry or 'value' not in entry:
+ value = func()
+ if value is None:
+ return None
+
+ entry = {'mtime': ondisk_mtime, 'value': value}
+ existing_cache[title] = entry
+
+ dump_cache()
+
+ return entry['value']
diff --git a/modules/call_queue.py b/modules/call_queue.py index 3b94f8a4..61aa240f 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -85,9 +85,9 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): elapsed = time.perf_counter() - t
elapsed_m = int(elapsed // 60)
elapsed_s = elapsed % 60
- elapsed_text = f"{elapsed_s:.2f}s"
+ elapsed_text = f"{elapsed_s:.1f} sec."
if elapsed_m > 0:
- elapsed_text = f"{elapsed_m}m "+elapsed_text
+ elapsed_text = f"{elapsed_m} min. "+elapsed_text
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
@@ -95,14 +95,22 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): reserved_peak = mem_stats['reserved_peak']
sys_peak = mem_stats['system_peak']
sys_total = mem_stats['total']
- sys_pct = round(sys_peak/max(sys_total, 1) * 100, 2)
+ sys_pct = sys_peak/max(sys_total, 1) * 100
- vram_html = f"<p class='vram'>Torch active/reserved: {active_peak}/{reserved_peak} MiB, <wbr>Sys VRAM: {sys_peak}/{sys_total} MiB ({sys_pct}%)</p>"
+ toltip_a = "Active: peak amount of video memory used during generation (excluding cached data)"
+ toltip_r = "Reserved: total amout of video memory allocated by the Torch library "
+ toltip_sys = "System: peak amout of video memory allocated by all running programs, out of total capacity"
+
+ text_a = f"<abbr title='{toltip_a}'>A</abbr>: <span class='measurement'>{active_peak/1024:.2f} GB</span>"
+ text_r = f"<abbr title='{toltip_r}'>R</abbr>: <span class='measurement'>{reserved_peak/1024:.2f} GB</span>"
+ text_sys = f"<abbr title='{toltip_sys}'>Sys</abbr>: <span class='measurement'>{sys_peak/1024:.1f}/{sys_total/1024:g} GB</span> ({sys_pct:.1f}%)"
+
+ vram_html = f"<p class='vram'>{text_a}, <wbr>{text_r}, <wbr>{text_sys}</p>"
else:
vram_html = ''
# last item is always HTML
- res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr>{elapsed_text}</p>{vram_html}</div>"
+ res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr><span class='measurement'>{elapsed_text}</span></p>{vram_html}</div>"
return tuple(res)
diff --git a/modules/cmd_args.py b/modules/cmd_args.py index ae78f469..e401f641 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -15,6 +15,7 @@ parser.add_argument("--update-check", action='store_true', help="launch.py argum parser.add_argument("--test-server", action='store_true', help="launch.py argument: configure server for testing")
parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation")
parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages")
+parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint")
parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
diff --git a/modules/extensions.py b/modules/extensions.py index abc6e2b1..c561159a 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -1,7 +1,7 @@ import os
import threading
-from modules import shared, errors
+from modules import shared, errors, cache
from modules.gitpython_hack import Repo
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
@@ -21,6 +21,7 @@ def active(): class Extension:
lock = threading.Lock()
+ cached_fields = ['remote', 'commit_date', 'branch', 'commit_hash', 'version']
def __init__(self, name, path, enabled=True, is_builtin=False):
self.name = name
@@ -36,15 +37,29 @@ class Extension: self.remote = None
self.have_info_from_repo = False
+ def to_dict(self):
+ return {x: getattr(self, x) for x in self.cached_fields}
+
+ def from_dict(self, d):
+ for field in self.cached_fields:
+ setattr(self, field, d[field])
+
def read_info_from_repo(self):
if self.is_builtin or self.have_info_from_repo:
return
- with self.lock:
- if self.have_info_from_repo:
- return
+ def read_from_repo():
+ with self.lock:
+ if self.have_info_from_repo:
+ return
+
+ self.do_read_info_from_repo()
+
+ return self.to_dict()
- self.do_read_info_from_repo()
+ d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo)
+ self.from_dict(d)
+ self.status = 'unknown'
def do_read_info_from_repo(self):
repo = None
@@ -58,7 +73,6 @@ class Extension: self.remote = None
else:
try:
- self.status = 'unknown'
self.remote = next(repo.remote().urls, None)
commit = repo.head.commit
self.commit_date = commit.committed_date
diff --git a/modules/extra_networks.py b/modules/extra_networks.py index 41799b0a..6ae07e91 100644 --- a/modules/extra_networks.py +++ b/modules/extra_networks.py @@ -4,16 +4,22 @@ from collections import defaultdict from modules import errors
extra_network_registry = {}
+extra_network_aliases = {}
def initialize():
extra_network_registry.clear()
+ extra_network_aliases.clear()
def register_extra_network(extra_network):
extra_network_registry[extra_network.name] = extra_network
+def register_extra_network_alias(extra_network, alias):
+ extra_network_aliases[alias] = extra_network
+
+
def register_default_extra_networks():
from modules.extra_networks_hypernet import ExtraNetworkHypernet
register_extra_network(ExtraNetworkHypernet())
@@ -82,20 +88,26 @@ def activate(p, extra_network_data): """call activate for extra networks in extra_network_data in specified order, then call
activate for all remaining registered networks with an empty argument list"""
+ activated = []
+
for extra_network_name, extra_network_args in extra_network_data.items():
extra_network = extra_network_registry.get(extra_network_name, None)
+
+ if extra_network is None:
+ extra_network = extra_network_aliases.get(extra_network_name, None)
+
if extra_network is None:
print(f"Skipping unknown extra network: {extra_network_name}")
continue
try:
extra_network.activate(p, extra_network_args)
+ activated.append(extra_network)
except Exception as e:
errors.display(e, f"activating extra network {extra_network_name} with arguments {extra_network_args}")
for extra_network_name, extra_network in extra_network_registry.items():
- args = extra_network_data.get(extra_network_name, None)
- if args is not None:
+ if extra_network in activated:
continue
try:
diff --git a/modules/hashes.py b/modules/hashes.py index ec1187fe..b7a33b42 100644 --- a/modules/hashes.py +++ b/modules/hashes.py @@ -1,43 +1,11 @@ import hashlib
-import json
import os.path
-import filelock
-
from modules import shared
-from modules.paths import data_path, script_path
-
-
-cache_filename = os.path.join(data_path, "cache.json")
-cache_data = None
-
-
-def dump_cache():
- with filelock.FileLock(f"{cache_filename}.lock"):
- with open(cache_filename, "w", encoding="utf8") as file:
- json.dump(cache_data, file, indent=4)
-
-
-def cache(subsection):
- global cache_data
-
- if cache_data is None:
- with filelock.FileLock(f"{cache_filename}.lock"):
- if not os.path.isfile(cache_filename):
- cache_data = {}
- else:
- try:
- with open(cache_filename, "r", encoding="utf8") as file:
- cache_data = json.load(file)
- except Exception:
- os.replace(cache_filename, os.path.join(script_path, "tmp", "cache.json"))
- print('[ERROR] issue occurred while trying to read cache.json, move current cache to tmp/cache.json and create new cache')
- cache_data = {}
-
- s = cache_data.get(subsection, {})
- cache_data[subsection] = s
+import modules.cache
- return s
+dump_cache = modules.cache.dump_cache
+cache = modules.cache.cache
def calculate_sha256(filename):
diff --git a/modules/images.py b/modules/images.py index 4bdedb7f..38aa933d 100644 --- a/modules/images.py +++ b/modules/images.py @@ -363,7 +363,7 @@ class FilenameGenerator: 'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
'sampler': lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False),
'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
- 'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.model_name, replace_spaces=False),
+ 'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.name_for_extra, replace_spaces=False),
'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime<Format>], [datetime<Format><Time Zone>]
'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
@@ -380,6 +380,7 @@ class FilenameGenerator: 'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT,
'user': lambda self: self.p.user,
'vae_filename': lambda self: self.get_vae_filename(),
+ 'none': lambda self: '', # Overrides the default so you can get just the sequence number
}
default_time_format = '%Y%m%d%H%M%S'
@@ -601,13 +602,13 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i else:
file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
+ file_decoration = namegen.apply(file_decoration) + suffix
+
add_number = opts.save_images_add_number or file_decoration == ''
if file_decoration != "" and add_number:
file_decoration = f"-{file_decoration}"
- file_decoration = namegen.apply(file_decoration) + suffix
-
if add_number:
basecount = get_next_sequence_number(path, basename)
fullfn = None
diff --git a/modules/img2img.py b/modules/img2img.py index 664e2688..a811e7a4 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -240,4 +240,4 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s if opts.do_not_show_images:
processed.images = []
- return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments)
+ return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments")
diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 01ea7c91..03552bc2 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -1,4 +1,5 @@ # this scripts installs necessary requirements and launches main program in webui.py
+import re
import subprocess
import os
import sys
@@ -9,6 +10,9 @@ from functools import lru_cache from modules import cmd_args, errors
from modules.paths_internal import script_path, extensions_dir
+from modules import timer
+
+timer.startup_timer.record("start")
args, _ = cmd_args.parser.parse_known_args()
@@ -69,10 +73,12 @@ def git_tag(): return subprocess.check_output([git, "describe", "--tags"], shell=False, encoding='utf8').strip()
except Exception:
try:
- from pathlib import Path
- changelog_md = Path(__file__).parent.parent / "CHANGELOG.md"
- with changelog_md.open(encoding="utf-8") as file:
- return next((line.strip() for line in file if line.strip()), "<none>")
+
+ changelog_md = os.path.join(os.path.dirname(os.path.dirname(__file__)), "CHANGELOG.md")
+ with open(changelog_md, "r", encoding="utf-8") as file:
+ line = next((line.strip() for line in file if line.strip()), "<none>")
+ line = line.replace("## ", "")
+ return line
except Exception:
return "<none>"
@@ -224,6 +230,44 @@ def run_extensions_installers(settings_file): run_extension_installer(os.path.join(extensions_dir, dirname_extension))
+re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*")
+
+
+def requrements_met(requirements_file):
+ """
+ Does a simple parse of a requirements.txt file to determine if all rerqirements in it
+ are already installed. Returns True if so, False if not installed or parsing fails.
+ """
+
+ import importlib.metadata
+ import packaging.version
+
+ with open(requirements_file, "r", encoding="utf8") as file:
+ for line in file:
+ if line.strip() == "":
+ continue
+
+ m = re.match(re_requirement, line)
+ if m is None:
+ return False
+
+ package = m.group(1).strip()
+ version_required = (m.group(2) or "").strip()
+
+ if version_required == "":
+ continue
+
+ try:
+ version_installed = importlib.metadata.version(package)
+ except Exception:
+ return False
+
+ if packaging.version.parse(version_required) != packaging.version.parse(version_installed):
+ return False
+
+ return True
+
+
def prepare_environment():
torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118")
torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}")
@@ -309,7 +353,9 @@ def prepare_environment(): if not os.path.isfile(requirements_file):
requirements_file = os.path.join(script_path, requirements_file)
- run_pip(f"install -r \"{requirements_file}\"", "requirements")
+
+ if not requrements_met(requirements_file):
+ run_pip(f"install -r \"{requirements_file}\"", "requirements")
run_extensions_installers(settings_file=args.ui_settings_file)
diff --git a/modules/processing.py b/modules/processing.py index eb4a60eb..a74a5302 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -14,7 +14,7 @@ from skimage import exposure from typing import Any, Dict, List
import modules.sd_hijack
-from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet
+from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors
from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
@@ -538,6 +538,40 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see return x
+def decode_latent_batch(model, batch, target_device=None, check_for_nans=False):
+ samples = []
+
+ for i in range(batch.shape[0]):
+ sample = decode_first_stage(model, batch[i:i + 1])[0]
+
+ if check_for_nans:
+ try:
+ devices.test_for_nans(sample, "vae")
+ except devices.NansException as e:
+ if devices.dtype_vae == torch.float32 or not shared.opts.auto_vae_precision:
+ raise e
+
+ errors.print_error_explanation(
+ "A tensor with all NaNs was produced in VAE.\n"
+ "Web UI will now convert VAE into 32-bit float and retry.\n"
+ "To disable this behavior, disable the 'Automaticlly revert VAE to 32-bit floats' setting.\n"
+ "To always start with 32-bit VAE, use --no-half-vae commandline flag."
+ )
+
+ devices.dtype_vae = torch.float32
+ model.first_stage_model.to(devices.dtype_vae)
+ batch = batch.to(devices.dtype_vae)
+
+ sample = decode_first_stage(model, batch[i:i + 1])[0]
+
+ if target_device is not None:
+ sample = sample.to(target_device)
+
+ samples.append(sample)
+
+ return samples
+
+
def decode_first_stage(model, x):
x = model.decode_first_stage(x.to(devices.dtype_vae))
@@ -587,7 +621,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
- "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
+ "Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
@@ -747,9 +781,10 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: p.setup_conds()
- if len(model_hijack.comments) > 0:
- for comment in model_hijack.comments:
- comments[comment] = 1
+ for comment in model_hijack.comments:
+ comments[comment] = 1
+
+ p.extra_generation_params.update(model_hijack.extra_generation_params)
if p.n_iter > 1:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
@@ -757,10 +792,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
- x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))]
- for x in x_samples_ddim:
- devices.test_for_nans(x, "vae")
-
+ x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True)
x_samples_ddim = torch.stack(x_samples_ddim).float()
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
@@ -1028,7 +1060,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): image = sd_samplers.sample_to_image(image, index, approximation=0)
info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index)
- images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, suffix="-before-highres-fix")
+ images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, p=self, suffix="-before-highres-fix")
if latent_scale_mode is not None:
for i in range(samples.shape[0]):
@@ -1302,7 +1334,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image = torch.from_numpy(batch_images)
image = 2. * image - 1.
- image = image.to(shared.device)
+ image = image.to(shared.device, dtype=devices.dtype_vae)
self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
diff --git a/modules/script_loading.py b/modules/script_loading.py index 306a1f35..0d55f193 100644 --- a/modules/script_loading.py +++ b/modules/script_loading.py @@ -12,11 +12,12 @@ def load_module(path): return module
-def preload_extensions(extensions_dir, parser):
+def preload_extensions(extensions_dir, parser, extension_list=None):
if not os.path.isdir(extensions_dir):
return
- for dirname in sorted(os.listdir(extensions_dir)):
+ extensions = extension_list if extension_list is not None else os.listdir(extensions_dir)
+ for dirname in sorted(extensions):
preload_script = os.path.join(extensions_dir, dirname, "preload.py")
if not os.path.isfile(preload_script):
continue
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 2b274c18..f5615967 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -159,7 +159,6 @@ def undo_weighted_forward(sd_model): class StableDiffusionModelHijack:
fixes = None
- comments = []
layers = None
circular_enabled = False
clip = None
@@ -168,6 +167,9 @@ class StableDiffusionModelHijack: embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase()
def __init__(self):
+ self.extra_generation_params = {}
+ self.comments = []
+
self.embedding_db.add_embedding_dir(cmd_opts.embeddings_dir)
def apply_optimizations(self, option=None):
@@ -274,6 +276,7 @@ class StableDiffusionModelHijack: def clear_comments(self):
self.comments = []
+ self.extra_generation_params = {}
def get_prompt_lengths(self, text):
if self.clip is None:
diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index b3771909..5443e609 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -234,9 +234,18 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): z = self.process_tokens(tokens, multipliers)
zs.append(z)
- if len(used_embeddings) > 0:
- embeddings_list = ", ".join([f'{name} [{embedding.checksum()}]' for name, embedding in used_embeddings.items()])
- self.hijack.comments.append(f"Used embeddings: {embeddings_list}")
+ if opts.textual_inversion_add_hashes_to_infotext and used_embeddings:
+ hashes = []
+ for name, embedding in used_embeddings.items():
+ shorthash = embedding.shorthash
+ if not shorthash:
+ continue
+
+ name = name.replace(":", "").replace(",", "")
+ hashes.append(f"{name}: {shorthash}")
+
+ if hashes:
+ self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes)
if getattr(self.wrapped, 'return_pooled', False):
return torch.hstack(zs), zs[0].pooled
diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py index ca1daf45..2101f1a0 100644 --- a/modules/sd_hijack_unet.py +++ b/modules/sd_hijack_unet.py @@ -39,7 +39,10 @@ def apply_model(orig_func, self, x_noisy, t, cond, **kwargs): if isinstance(cond, dict):
for y in cond.keys():
- cond[y] = [x.to(devices.dtype_unet) if isinstance(x, torch.Tensor) else x for x in cond[y]]
+ if isinstance(cond[y], list):
+ cond[y] = [x.to(devices.dtype_unet) if isinstance(x, torch.Tensor) else x for x in cond[y]]
+ else:
+ cond[y] = cond[y].to(devices.dtype_unet) if isinstance(cond[y], torch.Tensor) else cond[y]
with devices.autocast():
return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float()
@@ -77,3 +80,6 @@ first_stage_sub = lambda orig_func, self, x, **kwargs: orig_func(self, x.to(devi CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond)
+
+CondFunc('sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward', apply_model, unet_needs_upcast)
+CondFunc('sgm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
diff --git a/modules/sd_models.py b/modules/sd_models.py index 729f03d7..fb31a793 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -290,6 +290,9 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
model.is_sdxl = hasattr(model, 'conditioner')
+ model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model')
+ model.is_sd1 = not model.is_sdxl and not model.is_sd2
+
if model.is_sdxl:
sd_models_xl.extend_sdxl(model)
@@ -323,7 +326,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer timer.record("apply half()")
- devices.dtype_unet = model.model.diffusion_model.dtype
+ devices.dtype_unet = torch.float16 if model.is_sdxl and not shared.cmd_opts.no_half else model.model.diffusion_model.dtype
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
model.first_stage_model.to(devices.dtype_vae)
@@ -491,7 +494,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): sd_model = None
try:
- with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd):
+ with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd or shared.cmd_opts.do_not_download_clip):
sd_model = instantiate_from_config(sd_config.model)
except Exception:
pass
diff --git a/modules/shared.py b/modules/shared.py index b28933a0..aa72c9c8 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -11,6 +11,7 @@ import gradio as gr import torch
import tqdm
+import launch
import modules.interrogate
import modules.memmon
import modules.styles
@@ -26,7 +27,7 @@ demo = None parser = cmd_args.parser
-script_loading.preload_extensions(extensions_dir, parser)
+script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file))
script_loading.preload_extensions(extensions_builtin_dir, parser)
if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None:
@@ -426,6 +427,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
"CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"),
"upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
+ "auto_vae_precision": OptionInfo(True, "Automaticlly revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"),
"randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors"),
}))
@@ -473,12 +475,15 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"), options_templates.update(options_section(('extra_networks', "Extra Networks"), {
"extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."),
"extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'),
- "extra_networks_default_view": OptionInfo("cards", "Default view for Extra Networks", gr.Dropdown, {"choices": ["cards", "thumbs"]}),
- "extra_networks_default_multiplier": OptionInfo(1.0, "Multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
+ "extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}),
"extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"),
"extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"),
+ "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"),
+ "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"),
"extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"),
"ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_restart(),
+ "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"),
+ "textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"),
"sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *hypernetworks]}, refresh=reload_hypernetworks),
}))
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index cbe975b7..6166c76f 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -13,7 +13,7 @@ import numpy as np from PIL import Image, PngImagePlugin
from torch.utils.tensorboard import SummaryWriter
-from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors
+from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors, hashes
import modules.textual_inversion.dataset
from modules.textual_inversion.learn_schedule import LearnRateScheduler
@@ -49,6 +49,8 @@ class Embedding: self.sd_checkpoint_name = None
self.optimizer_state_dict = None
self.filename = None
+ self.hash = None
+ self.shorthash = None
def save(self, filename):
embedding_data = {
@@ -82,6 +84,10 @@ class Embedding: self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}'
return self.cached_checksum
+ def set_hash(self, v):
+ self.hash = v
+ self.shorthash = self.hash[0:12]
+
class DirWithTextualInversionEmbeddings:
def __init__(self, path):
@@ -199,6 +205,7 @@ class EmbeddingDatabase: embedding.vectors = vec.shape[0]
embedding.shape = vec.shape[-1]
embedding.filename = path
+ embedding.set_hash(hashes.sha256(embedding.filename, "textual_inversion/" + name) or '')
if self.expected_shape == -1 or self.expected_shape == embedding.shape:
self.register_embedding(embedding, shared.sd_model)
@@ -249,7 +256,7 @@ class EmbeddingDatabase: self.word_embeddings.update(sorted_word_embeddings)
displayed_embeddings = (tuple(self.word_embeddings.keys()), tuple(self.skipped_embeddings.keys()))
- if self.previously_displayed_embeddings != displayed_embeddings:
+ if shared.opts.textual_inversion_print_at_load and self.previously_displayed_embeddings != displayed_embeddings:
self.previously_displayed_embeddings = displayed_embeddings
print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}")
if self.skipped_embeddings:
diff --git a/modules/txt2img.py b/modules/txt2img.py index d0be2e73..29d94e8c 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -70,4 +70,4 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step if opts.do_not_show_images:
processed.images = []
- return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments)
+ return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments")
diff --git a/modules/ui.py b/modules/ui.py index 39d226ad..07ecee7b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -83,8 +83,7 @@ detect_image_size_symbol = '\U0001F4D0' # 📐 up_down_symbol = '\u2195\ufe0f' # ↕️
-def plaintext_to_html(text):
- return ui_common.plaintext_to_html(text)
+plaintext_to_html = ui_common.plaintext_to_html
def send_gradio_gallery_to_image(x):
diff --git a/modules/ui_common.py b/modules/ui_common.py index 57c2d0ad..11eb2a4b 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -29,9 +29,10 @@ def update_generation_info(generation_info, html_info, img_index): return html_info, gr.update()
-def plaintext_to_html(text):
- text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
- return text
+def plaintext_to_html(text, classname=None):
+ content = "<br>\n".join(html.escape(x) for x in text.split('\n'))
+
+ return f"<p class='{classname}'>{content}</p>" if classname else f"<p>{content}</p>"
def save_files(js_data, images, do_make_zip, index):
@@ -157,7 +158,7 @@ Requested path was: {f} with gr.Group():
html_info = gr.HTML(elem_id=f'html_info_{tabname}', elem_classes="infotext")
- html_log = gr.HTML(elem_id=f'html_log_{tabname}')
+ html_log = gr.HTML(elem_id=f'html_log_{tabname}', elem_classes="html-log")
generation_info = gr.Textbox(visible=False, elem_id=f'generation_info_{tabname}')
if tabname == 'txt2img' or tabname == 'img2img':
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index dff522ef..f3e4fba7 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -1,5 +1,5 @@ import json
-import os.path
+import os
import threading
import time
from datetime import datetime
@@ -513,14 +513,8 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=" def preload_extensions_git_metadata():
- t0 = time.time()
for extension in extensions.extensions:
extension.read_info_from_repo()
- print(
- f"preload_extensions_git_metadata for "
- f"{len(extensions.extensions)} extensions took "
- f"{time.time() - t0:.2f}s"
- )
def create_ui():
@@ -570,7 +564,8 @@ def create_ui(): with gr.TabItem("Available", id="available"):
with gr.Row():
refresh_available_extensions_button = gr.Button(value="Load from:", variant="primary")
- available_extensions_index = gr.Text(value="https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui-extensions/master/index.json", label="Extension index URL").style(container=False)
+ extensions_index_url = os.environ.get('WEBUI_EXTENSIONS_INDEX', "https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui-extensions/master/index.json")
+ available_extensions_index = gr.Text(value=extensions_index_url, label="Extension index URL").style(container=False)
extension_to_install = gr.Text(elem_id="extension_to_install", visible=False)
install_extension_button = gr.Button(elem_id="install_extension_button", visible=False)
diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 693cafb6..49612298 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -2,14 +2,16 @@ import os.path import urllib.parse
from pathlib import Path
-from modules import shared
+from modules import shared, ui_extra_networks_user_metadata, errors
from modules.images import read_info_from_image, save_image_with_geninfo
from modules.ui import up_down_symbol
import gradio as gr
import json
import html
+from fastapi.exceptions import HTTPException
from modules.generation_parameters_copypaste import image_from_url_text
+from modules.ui_components import ToolButton
extra_pages = []
allowed_dirs = set()
@@ -26,6 +28,9 @@ def register_page(page): def fetch_file(filename: str = ""):
from starlette.responses import FileResponse
+ if not os.path.isfile(filename):
+ raise HTTPException(status_code=404, detail="File not found")
+
if not any(Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs):
raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.")
@@ -48,25 +53,71 @@ def get_metadata(page: str = "", item: str = ""): if metadata is None:
return JSONResponse({})
- return JSONResponse({"metadata": metadata})
+ return JSONResponse({"metadata": json.dumps(metadata, indent=4, ensure_ascii=False)})
+
+
+def get_single_card(page: str = "", tabname: str = "", name: str = ""):
+ from starlette.responses import JSONResponse
+
+ page = next(iter([x for x in extra_pages if x.name == page]), None)
+
+ try:
+ item = page.create_item(name, enable_filter=False)
+ page.items[name] = item
+ except Exception as e:
+ errors.display(e, "creating item for extra network")
+ item = page.items.get(name)
+
+ page.read_user_metadata(item)
+ item_html = page.create_html_for_item(item, tabname)
+
+ return JSONResponse({"html": item_html})
def add_pages_to_demo(app):
app.add_api_route("/sd_extra_networks/thumb", fetch_file, methods=["GET"])
app.add_api_route("/sd_extra_networks/metadata", get_metadata, methods=["GET"])
+ app.add_api_route("/sd_extra_networks/get-single-card", get_single_card, methods=["GET"])
+
+
+def quote_js(s):
+ s = s.replace('\\', '\\\\')
+ s = s.replace('"', '\\"')
+ return f'"{s}"'
class ExtraNetworksPage:
def __init__(self, title):
self.title = title
self.name = title.lower()
+ self.id_page = self.name.replace(" ", "_")
self.card_page = shared.html("extra-networks-card.html")
self.allow_negative_prompt = False
self.metadata = {}
+ self.items = {}
def refresh(self):
pass
+ def read_user_metadata(self, item):
+ filename = item.get("filename", None)
+ basename, ext = os.path.splitext(filename)
+ metadata_filename = basename + '.json'
+
+ metadata = {}
+ try:
+ if os.path.isfile(metadata_filename):
+ with open(metadata_filename, "r", encoding="utf8") as file:
+ metadata = json.load(file)
+ except Exception as e:
+ errors.display(e, f"reading extra network user metadata from {metadata_filename}")
+
+ desc = metadata.get("description", None)
+ if desc is not None:
+ item["description"] = desc
+
+ item["user_metadata"] = metadata
+
def link_preview(self, filename):
quoted_filename = urllib.parse.quote(filename.replace('\\', '/'))
mtime = os.path.getmtime(filename)
@@ -83,7 +134,6 @@ class ExtraNetworksPage: return ""
def create_html(self, tabname):
- view = shared.opts.extra_networks_default_view
items_html = ''
self.metadata = {}
@@ -119,11 +169,15 @@ class ExtraNetworksPage: </button>
""" for subdir in subdirs])
- for item in self.list_items():
+ self.items = {x["name"]: x for x in self.list_items()}
+ for item in self.items.values():
metadata = item.get("metadata")
if metadata:
self.metadata[item["name"]] = metadata
+ if "user_metadata" not in item:
+ self.read_user_metadata(item)
+
items_html += self.create_html_for_item(item, tabname)
if items_html == '':
@@ -133,16 +187,19 @@ class ExtraNetworksPage: self_name_id = self.name.replace(" ", "_")
res = f"""
-<div id='{tabname}_{self_name_id}_subdirs' class='extra-network-subdirs extra-network-subdirs-{view}'>
+<div id='{tabname}_{self_name_id}_subdirs' class='extra-network-subdirs extra-network-subdirs-cards'>
{subdirs_html}
</div>
-<div id='{tabname}_{self_name_id}_cards' class='extra-network-{view}'>
+<div id='{tabname}_{self_name_id}_cards' class='extra-network-cards'>
{items_html}
</div>
"""
return res
+ def create_item(self, name, index=None):
+ raise NotImplementedError()
+
def list_items(self):
raise NotImplementedError()
@@ -158,7 +215,7 @@ class ExtraNetworksPage: onclick = item.get("onclick", None)
if onclick is None:
- onclick = '"' + html.escape(f"""return cardClicked({json.dumps(tabname)}, {item["prompt"]}, {"true" if self.allow_negative_prompt else "false"})""") + '"'
+ onclick = '"' + html.escape(f"""return cardClicked({quote_js(tabname)}, {item["prompt"]}, {"true" if self.allow_negative_prompt else "false"})""") + '"'
height = f"height: {shared.opts.extra_networks_card_height}px;" if shared.opts.extra_networks_card_height else ''
width = f"width: {shared.opts.extra_networks_card_width}px;" if shared.opts.extra_networks_card_width else ''
@@ -166,7 +223,9 @@ class ExtraNetworksPage: metadata_button = ""
metadata = item.get("metadata")
if metadata:
- metadata_button = f"<div class='metadata-button' title='Show metadata' onclick='extraNetworksRequestMetadata(event, {json.dumps(self.name)}, {json.dumps(item['name'])})'></div>"
+ metadata_button = f"<div class='metadata-button card-button' title='Show internal metadata' onclick='extraNetworksRequestMetadata(event, {quote_js(self.name)}, {quote_js(item['name'])})'></div>"
+
+ edit_button = f"<div class='edit-button card-button' title='Edit metadata' onclick='extraNetworksEditUserMetadata(event, {quote_js(tabname)}, {quote_js(self.id_page)}, {quote_js(item['name'])})'></div>"
local_path = ""
filename = item.get("filename", "")
@@ -190,16 +249,17 @@ class ExtraNetworksPage: args = {
"background_image": background_image,
- "style": f"'display: none; {height}{width}'",
+ "style": f"'display: none; {height}{width}; font-size: {shared.opts.extra_networks_card_text_scale*100}%'",
"prompt": item.get("prompt", None),
- "tabname": json.dumps(tabname),
- "local_preview": json.dumps(item["local_preview"]),
+ "tabname": quote_js(tabname),
+ "local_preview": quote_js(item["local_preview"]),
"name": item["name"],
- "description": (item.get("description") or ""),
+ "description": (item.get("description") or "" if shared.opts.extra_networks_card_show_desc else ""),
"card_clicked": onclick,
- "save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {json.dumps(tabname)}, {json.dumps(item["local_preview"])})""") + '"',
+ "save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {quote_js(tabname)}, {quote_js(item["local_preview"])})""") + '"',
"search_term": item.get("search_term", ""),
"metadata_button": metadata_button,
+ "edit_button": edit_button,
"search_only": " search_only" if search_only else "",
"sort_keys": sort_keys,
}
@@ -247,6 +307,9 @@ class ExtraNetworksPage: pass
return None
+ def create_user_metadata_editor(self, ui, tabname):
+ return ui_extra_networks_user_metadata.UserMetadataEditor(ui, tabname, self)
+
def initialize():
extra_pages.clear()
@@ -297,23 +360,26 @@ def create_ui(container, button, tabname): ui = ExtraNetworksUi()
ui.pages = []
ui.pages_contents = []
+ ui.user_metadata_editors = []
ui.stored_extra_pages = pages_in_preferred_order(extra_pages.copy())
ui.tabname = tabname
with gr.Tabs(elem_id=tabname+"_extra_tabs"):
for page in ui.stored_extra_pages:
- page_id = page.title.lower().replace(" ", "_")
-
- with gr.Tab(page.title, id=page_id):
- elem_id = f"{tabname}_{page_id}_cards_html"
+ with gr.Tab(page.title, id=page.id_page):
+ elem_id = f"{tabname}_{page.id_page}_cards_html"
page_elem = gr.HTML('Loading...', elem_id=elem_id)
ui.pages.append(page_elem)
- page_elem.change(fn=lambda: None, _js='function(){applyExtraNetworkFilter(' + json.dumps(tabname) + '); return []}', inputs=[], outputs=[])
+ page_elem.change(fn=lambda: None, _js='function(){applyExtraNetworkFilter(' + quote_js(tabname) + '); return []}', inputs=[], outputs=[])
+
+ editor = page.create_user_metadata_editor(ui, tabname)
+ editor.create_ui()
+ ui.user_metadata_editors.append(editor)
gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", placeholder="Search...", visible=False)
gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", multiselect=False, visible=False, show_label=False, interactive=True)
- gr.Button(up_down_symbol, elem_id=tabname+"_extra_sortorder")
+ ToolButton(up_down_symbol, elem_id=tabname+"_extra_sortorder")
button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh")
ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False)
@@ -363,6 +429,8 @@ def path_is_parent(parent_path, child_path): def setup_ui(ui, gallery):
def save_preview(index, images, filename):
+ # this function is here for backwards compatibility and likely will be removed soon
+
if len(images) == 0:
print("There is no image in gallery to save as a preview.")
return [page.create_html(ui.tabname) for page in ui.stored_extra_pages]
@@ -394,3 +462,7 @@ def setup_ui(ui, gallery): outputs=[*ui.pages]
)
+ for editor in ui.user_metadata_editors:
+ editor.setup_ui(gallery)
+
+
diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index 8b9ab71b..76780cfd 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -1,8 +1,8 @@ import html
-import json
import os
from modules import shared, ui_extra_networks, sd_models
+from modules.ui_extra_networks import quote_js
class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
@@ -12,21 +12,23 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): def refresh(self):
shared.refresh_checkpoints()
+ def create_item(self, name, index=None):
+ checkpoint: sd_models.CheckpointInfo = sd_models.checkpoint_aliases.get(name)
+ path, ext = os.path.splitext(checkpoint.filename)
+ return {
+ "name": checkpoint.name_for_extra,
+ "filename": checkpoint.filename,
+ "preview": self.find_preview(path),
+ "description": self.find_description(path),
+ "search_term": self.search_terms_from_path(checkpoint.filename) + " " + (checkpoint.sha256 or ""),
+ "onclick": '"' + html.escape(f"""return selectCheckpoint({quote_js(name)})""") + '"',
+ "local_preview": f"{path}.{shared.opts.samples_format}",
+ "sort_keys": {'default': index, **self.get_sort_keys(checkpoint.filename)},
+ }
+
def list_items(self):
- checkpoint: sd_models.CheckpointInfo
- for index, (name, checkpoint) in enumerate(sd_models.checkpoints_list.items()):
- path, ext = os.path.splitext(checkpoint.filename)
- yield {
- "name": checkpoint.name_for_extra,
- "filename": path,
- "preview": self.find_preview(path),
- "description": self.find_description(path),
- "search_term": self.search_terms_from_path(checkpoint.filename) + " " + (checkpoint.sha256 or ""),
- "onclick": '"' + html.escape(f"""return selectCheckpoint({json.dumps(name)})""") + '"',
- "local_preview": f"{path}.{shared.opts.samples_format}",
- "sort_keys": {'default': index, **self.get_sort_keys(checkpoint.filename)},
-
- }
+ for index, name in enumerate(sd_models.checkpoints_list):
+ yield self.create_item(name, index)
def allowed_directories_for_previews(self):
return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None]
diff --git a/modules/ui_extra_networks_hypernets.py b/modules/ui_extra_networks_hypernets.py index 7c19b532..e53ccb42 100644 --- a/modules/ui_extra_networks_hypernets.py +++ b/modules/ui_extra_networks_hypernets.py @@ -1,7 +1,7 @@ -import json
import os
from modules import shared, ui_extra_networks
+from modules.ui_extra_networks import quote_js
class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
@@ -11,21 +11,24 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage): def refresh(self):
shared.reload_hypernetworks()
+ def create_item(self, name, index=None):
+ full_path = shared.hypernetworks[name]
+ path, ext = os.path.splitext(full_path)
+
+ return {
+ "name": name,
+ "filename": full_path,
+ "preview": self.find_preview(path),
+ "description": self.find_description(path),
+ "search_term": self.search_terms_from_path(path),
+ "prompt": quote_js(f"<hypernet:{name}:") + " + opts.extra_networks_default_multiplier + " + quote_js(">"),
+ "local_preview": f"{path}.preview.{shared.opts.samples_format}",
+ "sort_keys": {'default': index, **self.get_sort_keys(path + ext)},
+ }
+
def list_items(self):
- for index, (name, path) in enumerate(shared.hypernetworks.items()):
- path, ext = os.path.splitext(path)
-
- yield {
- "name": name,
- "filename": path,
- "preview": self.find_preview(path),
- "description": self.find_description(path),
- "search_term": self.search_terms_from_path(path),
- "prompt": json.dumps(f"<hypernet:{name}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
- "local_preview": f"{path}.preview.{shared.opts.samples_format}",
- "sort_keys": {'default': index, **self.get_sort_keys(path + ext)},
-
- }
+ for index, name in enumerate(shared.hypernetworks):
+ yield self.create_item(name, index)
def allowed_directories_for_previews(self):
return [shared.cmd_opts.hypernetwork_dir]
diff --git a/modules/ui_extra_networks_textual_inversion.py b/modules/ui_extra_networks_textual_inversion.py index 58a61c55..d1794e50 100644 --- a/modules/ui_extra_networks_textual_inversion.py +++ b/modules/ui_extra_networks_textual_inversion.py @@ -1,7 +1,7 @@ -import json
import os
from modules import ui_extra_networks, sd_hijack, shared
+from modules.ui_extra_networks import quote_js
class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage):
@@ -12,20 +12,24 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage): def refresh(self):
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True)
+ def create_item(self, name, index=None):
+ embedding = sd_hijack.model_hijack.embedding_db.word_embeddings.get(name)
+
+ path, ext = os.path.splitext(embedding.filename)
+ return {
+ "name": name,
+ "filename": embedding.filename,
+ "preview": self.find_preview(path),
+ "description": self.find_description(path),
+ "search_term": self.search_terms_from_path(embedding.filename),
+ "prompt": quote_js(embedding.name),
+ "local_preview": f"{path}.preview.{shared.opts.samples_format}",
+ "sort_keys": {'default': index, **self.get_sort_keys(embedding.filename)},
+ }
+
def list_items(self):
- for index, embedding in enumerate(sd_hijack.model_hijack.embedding_db.word_embeddings.values()):
- path, ext = os.path.splitext(embedding.filename)
- yield {
- "name": embedding.name,
- "filename": embedding.filename,
- "preview": self.find_preview(path),
- "description": self.find_description(path),
- "search_term": self.search_terms_from_path(embedding.filename),
- "prompt": json.dumps(embedding.name),
- "local_preview": f"{path}.preview.{shared.opts.samples_format}",
- "sort_keys": {'default': index, **self.get_sort_keys(embedding.filename)},
-
- }
+ for index, name in enumerate(sd_hijack.model_hijack.embedding_db.word_embeddings):
+ yield self.create_item(name, index)
def allowed_directories_for_previews(self):
return list(sd_hijack.model_hijack.embedding_db.embedding_dirs)
diff --git a/modules/ui_extra_networks_user_metadata.py b/modules/ui_extra_networks_user_metadata.py new file mode 100644 index 00000000..63d4b503 --- /dev/null +++ b/modules/ui_extra_networks_user_metadata.py @@ -0,0 +1,195 @@ +import datetime
+import html
+import json
+import os.path
+
+import gradio as gr
+
+from modules import generation_parameters_copypaste, images, sysinfo, errors
+
+
+class UserMetadataEditor:
+
+ def __init__(self, ui, tabname, page):
+ self.ui = ui
+ self.tabname = tabname
+ self.page = page
+ self.id_part = f"{self.tabname}_{self.page.id_page}_edit_user_metadata"
+
+ self.box = None
+
+ self.edit_name_input = None
+ self.button_edit = None
+
+ self.edit_name = None
+ self.edit_description = None
+ self.edit_notes = None
+ self.html_filedata = None
+ self.html_preview = None
+ self.html_status = None
+
+ self.button_cancel = None
+ self.button_replace_preview = None
+ self.button_save = None
+
+ def get_user_metadata(self, name):
+ item = self.page.items.get(name, {})
+
+ user_metadata = item.get('user_metadata', None)
+ if user_metadata is None:
+ user_metadata = {}
+ item['user_metadata'] = user_metadata
+
+ return user_metadata
+
+ def create_extra_default_items_in_left_column(self):
+ pass
+
+ def create_default_editor_elems(self):
+ with gr.Row():
+ with gr.Column(scale=2):
+ self.edit_name = gr.HTML(elem_classes="extra-network-name")
+ self.edit_description = gr.Textbox(label="Description", lines=4)
+ self.html_filedata = gr.HTML()
+
+ self.create_extra_default_items_in_left_column()
+
+ with gr.Column(scale=1, min_width=0):
+ self.html_preview = gr.HTML()
+
+ def create_default_buttons(self):
+
+ with gr.Row(elem_classes="edit-user-metadata-buttons"):
+ self.button_cancel = gr.Button('Cancel')
+ self.button_replace_preview = gr.Button('Replace preview', variant='primary')
+ self.button_save = gr.Button('Save', variant='primary')
+
+ self.html_status = gr.HTML(elem_classes="edit-user-metadata-status")
+
+ self.button_cancel.click(fn=None, _js="closePopup")
+
+ def get_card_html(self, name):
+ item = self.page.items.get(name, {})
+
+ preview_url = item.get("preview", None)
+
+ if not preview_url:
+ filename, _ = os.path.splitext(item["filename"])
+ preview_url = self.page.find_preview(filename)
+ item["preview"] = preview_url
+
+ if preview_url:
+ preview = f'''
+ <div class='card standalone-card-preview'>
+ <img src="{html.escape(preview_url)}" class="preview">
+ </div>
+ '''
+ else:
+ preview = "<div class='card standalone-card-preview'></div>"
+
+ return preview
+
+ def get_metadata_table(self, name):
+ item = self.page.items.get(name, {})
+ try:
+ filename = item["filename"]
+
+ stats = os.stat(filename)
+ params = [
+ ('File size: ', sysinfo.pretty_bytes(stats.st_size)),
+ ('Modified: ', datetime.datetime.fromtimestamp(stats.st_mtime).strftime('%Y-%m-%d %H:%M')),
+ ]
+
+ return params
+ except Exception as e:
+ errors.display(e, f"reading info for {name}")
+ return []
+
+ def put_values_into_components(self, name):
+ user_metadata = self.get_user_metadata(name)
+
+ try:
+ params = self.get_metadata_table(name)
+ except Exception as e:
+ errors.display(e, f"reading metadata info for {name}")
+ params = []
+
+ table = '<table class="file-metadata">' + "".join(f"<tr><th>{name}</th><td>{value}</td></tr>" for name, value in params) + '</table>'
+
+ return html.escape(name), user_metadata.get('description', ''), table, self.get_card_html(name), user_metadata.get('notes', '')
+
+ def write_user_metadata(self, name, metadata):
+ item = self.page.items.get(name, {})
+ filename = item.get("filename", None)
+ basename, ext = os.path.splitext(filename)
+
+ with open(basename + '.json', "w", encoding="utf8") as file:
+ json.dump(metadata, file)
+
+ def save_user_metadata(self, name, desc, notes):
+ user_metadata = self.get_user_metadata(name)
+ user_metadata["description"] = desc
+ user_metadata["notes"] = notes
+
+ self.write_user_metadata(name, user_metadata)
+
+ def setup_save_handler(self, button, func, components):
+ button\
+ .click(fn=func, inputs=[self.edit_name_input, *components], outputs=[])\
+ .then(fn=None, _js="function(name){closePopup(); extraNetworksRefreshSingleCard(" + json.dumps(self.page.name) + "," + json.dumps(self.tabname) + ", name);}", inputs=[self.edit_name_input], outputs=[])
+
+ def create_editor(self):
+ self.create_default_editor_elems()
+
+ self.edit_notes = gr.TextArea(label='Notes', lines=4)
+
+ self.create_default_buttons()
+
+ self.button_edit\
+ .click(fn=self.put_values_into_components, inputs=[self.edit_name_input], outputs=[self.edit_name, self.edit_description, self.html_filedata, self.html_preview, self.edit_notes])\
+ .then(fn=lambda: gr.update(visible=True), inputs=[], outputs=[self.box])
+
+ self.setup_save_handler(self.button_save, self.save_user_metadata, [self.edit_description, self.edit_notes])
+
+ def create_ui(self):
+ with gr.Box(visible=False, elem_id=self.id_part, elem_classes="edit-user-metadata") as box:
+ self.box = box
+
+ self.edit_name_input = gr.Textbox("Edit user metadata card id", visible=False, elem_id=f"{self.id_part}_name")
+ self.button_edit = gr.Button("Edit user metadata", visible=False, elem_id=f"{self.id_part}_button")
+
+ self.create_editor()
+
+ def save_preview(self, index, gallery, name):
+ if len(gallery) == 0:
+ return self.get_card_html(name), "There is no image in gallery to save as a preview."
+
+ item = self.page.items.get(name, {})
+
+ index = int(index)
+ index = 0 if index < 0 else index
+ index = len(gallery) - 1 if index >= len(gallery) else index
+
+ img_info = gallery[index if index >= 0 else 0]
+ image = generation_parameters_copypaste.image_from_url_text(img_info)
+ geninfo, items = images.read_info_from_image(image)
+
+ images.save_image_with_geninfo(image, geninfo, item["local_preview"])
+
+ return self.get_card_html(name), ''
+
+ def setup_ui(self, gallery):
+ self.button_replace_preview.click(
+ fn=self.save_preview,
+ _js="function(x, y, z){return [selected_gallery_index(), y, z]}",
+ inputs=[self.edit_name_input, gallery, self.edit_name_input],
+ outputs=[self.html_preview, self.html_status]
+ ).then(
+ fn=None,
+ _js="function(name){extraNetworksRefreshSingleCard(" + json.dumps(self.page.name) + "," + json.dumps(self.tabname) + ", name);}",
+ inputs=[self.edit_name_input],
+ outputs=[]
+ )
+
+
+
|