From 44c27ebc7393ea793245aa565ace6c9bf1313980 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 10 Jul 2023 20:08:23 +0300 Subject: Use closing() with processing classes everywhere Follows up on #11569 --- modules/textual_inversion/textual_inversion.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index bb6f211c..cbe975b7 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -1,5 +1,6 @@ import os from collections import namedtuple +from contextlib import closing import torch import tqdm @@ -584,8 +585,9 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st preview_text = p.prompt - processed = processing.process_images(p) - image = processed.images[0] if len(processed.images) > 0 else None + with closing(p): + processed = processing.process_images(p) + image = processed.images[0] if len(processed.images) > 0 else None if unload: shared.sd_model.first_stage_model.to(devices.cpu) -- cgit v1.2.1 From 2b1bae0d755c2d5201f6a6aadeadb5588208d43f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 15 Jul 2023 08:41:22 +0300 Subject: add textual inversion hashes to infotext --- modules/textual_inversion/textual_inversion.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index cbe975b7..38e072a8 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -13,7 +13,7 @@ import numpy as np from PIL import Image, PngImagePlugin from torch.utils.tensorboard import SummaryWriter -from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors +from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors, hashes import modules.textual_inversion.dataset from modules.textual_inversion.learn_schedule import LearnRateScheduler @@ -49,6 +49,8 @@ class Embedding: self.sd_checkpoint_name = None self.optimizer_state_dict = None self.filename = None + self.hash = None + self.shorthash = None def save(self, filename): embedding_data = { @@ -82,6 +84,10 @@ class Embedding: self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}' return self.cached_checksum + def set_hash(self, v): + self.hash = v + self.shorthash = self.hash[0:12] + class DirWithTextualInversionEmbeddings: def __init__(self, path): @@ -199,6 +205,7 @@ class EmbeddingDatabase: embedding.vectors = vec.shape[0] embedding.shape = vec.shape[-1] embedding.filename = path + embedding.set_hash(hashes.sha256(embedding.filename, "textual_inversion/" + name) or '') if self.expected_shape == -1 or self.expected_shape == embedding.shape: self.register_embedding(embedding, shared.sd_model) -- cgit v1.2.1 From 0aa8d538e147ba87df36d8196845807c8fa3f4e1 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 15 Jul 2023 09:24:22 +0300 Subject: suppress printing TI embedding into console by default --- modules/textual_inversion/textual_inversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 38e072a8..6166c76f 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -256,7 +256,7 @@ class EmbeddingDatabase: self.word_embeddings.update(sorted_word_embeddings) displayed_embeddings = (tuple(self.word_embeddings.keys()), tuple(self.skipped_embeddings.keys())) - if self.previously_displayed_embeddings != displayed_embeddings: + if shared.opts.textual_inversion_print_at_load and self.previously_displayed_embeddings != displayed_embeddings: self.previously_displayed_embeddings = displayed_embeddings print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}") if self.skipped_embeddings: -- cgit v1.2.1