From 1618df41bad092e068c61bf510b1e20856821ad5 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Fri, 28 Oct 2022 10:31:27 +0700 Subject: Gradient clipping for textual embedding --- modules/textual_inversion/textual_inversion.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index ff002d3e..7bad73a6 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -206,7 +206,7 @@ def write_loss(log_directory, filename, step, epoch_len, values): }) -def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): +def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, clip_grad_mode, clip_grad_value, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): assert embedding_name, 'embedding not selected' shared.state.textinfo = "Initializing textual inversion training..." @@ -256,6 +256,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc if ititial_step > steps: return embedding, filename + clip_grad_mode_value = clip_grad_mode == "value" + clip_grad_mode_norm = clip_grad_mode == "norm" + scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate) @@ -280,6 +283,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc optimizer.zero_grad() loss.backward() + + if clip_grad_mode_value: + torch.nn.utils.clip_grad_value_(embedding.vec, clip_value=clip_grad_value) + elif clip_grad_mode_norm: + torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=clip_grad_value) + optimizer.step() -- cgit v1.2.1 From 16451ca573220e49f2eaaab97580b6b91287c8c4 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Fri, 28 Oct 2022 17:16:23 +0700 Subject: Learning rate sched syntax support for grad clipping --- modules/textual_inversion/textual_inversion.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 7bad73a6..6b00c6a1 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -255,9 +255,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc ititial_step = embedding.step or 0 if ititial_step > steps: return embedding, filename - + clip_grad_mode_value = clip_grad_mode == "value" clip_grad_mode_norm = clip_grad_mode == "norm" + clip_grad_enabled = clip_grad_mode_value or clip_grad_mode_norm + if clip_grad_enabled: + clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False) scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate) @@ -273,6 +276,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc if shared.state.interrupted: break + if clip_grad_enabled: + clip_grad_sched.step(embedding.step) + with torch.autocast("cuda"): c = cond_model([entry.cond_text for entry in entries]) x = torch.stack([entry.latent for entry in entries]).to(devices.device) @@ -285,9 +291,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc loss.backward() if clip_grad_mode_value: - torch.nn.utils.clip_grad_value_(embedding.vec, clip_value=clip_grad_value) + torch.nn.utils.clip_grad_value_(embedding.vec, clip_value=clip_grad_sched.learn_rate) elif clip_grad_mode_norm: - torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=clip_grad_value) + torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=clip_grad_sched.learn_rate) optimizer.step() -- cgit v1.2.1 From cffc240a7327ae60671ff533469fc4ed4bf605de Mon Sep 17 00:00:00 2001 From: Nerogar Date: Sun, 23 Oct 2022 14:05:25 +0200 Subject: fixed textual inversion training with inpainting models --- modules/textual_inversion/textual_inversion.py | 27 +++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 0aeb0459..2630c7c9 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -224,6 +224,26 @@ def validate_train_inputs(model_name, learn_rate, batch_size, data_root, templat if save_model_every or create_image_every: assert log_directory, "Log directory is empty" +def create_dummy_mask(x, width=None, height=None): + if shared.sd_model.model.conditioning_key in {'hybrid', 'concat'}: + + # The "masked-image" in this case will just be all zeros since the entire image is masked. + image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) + image_conditioning = shared.sd_model.get_first_stage_encoding(shared.sd_model.encode_first_stage(image_conditioning)) + + # Add the fake full 1s mask to the first dimension. + image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) + image_conditioning = image_conditioning.to(x.dtype) + + else: + # Dummy zero conditioning if we're not using inpainting model. + # Still takes up a bit of memory, but no encoder call. + # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size. + image_conditioning = torch.zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) + + return image_conditioning + + def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): save_embedding_every = save_embedding_every or 0 create_image_every = create_image_every or 0 @@ -286,6 +306,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc forced_filename = "" embedding_yet_to_be_embedded = False + img_c = None pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step) for i, entries in pbar: embedding.step = i + ititial_step @@ -299,8 +320,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc with torch.autocast("cuda"): c = cond_model([entry.cond_text for entry in entries]) + if img_c is None: + img_c = create_dummy_mask(c, training_width, training_height) + x = torch.stack([entry.latent for entry in entries]).to(devices.device) - loss = shared.sd_model(x, c)[0] + cond = {"c_concat": [img_c], "c_crossattn": [c]} + loss = shared.sd_model(x, cond)[0] del x losses[embedding.step % losses.shape[0]] = loss.item() -- cgit v1.2.1 From bb832d7725187f8a8ab44faa6ee1b38cb5f600aa Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sat, 5 Nov 2022 11:48:38 +0700 Subject: Simplify grad clip --- modules/textual_inversion/textual_inversion.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index c567ec3f..687d97bb 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -269,10 +269,10 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) - clip_grad_mode_value = clip_grad_mode == "value" - clip_grad_mode_norm = clip_grad_mode == "norm" - clip_grad_enabled = clip_grad_mode_value or clip_grad_mode_norm - if clip_grad_enabled: + clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \ + torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \ + None + if clip_grad: clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False) # dataset loading may take a while, so input validations and early returns should be done before this shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." @@ -302,7 +302,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc if shared.state.interrupted: break - if clip_grad_enabled: + if clip_grad: clip_grad_sched.step(embedding.step) with torch.autocast("cuda"): @@ -316,10 +316,8 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc optimizer.zero_grad() loss.backward() - if clip_grad_mode_value: - torch.nn.utils.clip_grad_value_(embedding.vec, clip_value=clip_grad_sched.learn_rate) - elif clip_grad_mode_norm: - torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=clip_grad_sched.learn_rate) + if clip_grad: + clip_grad(embedding.vec, clip_grad_sched.learn_rate) optimizer.step() -- cgit v1.2.1 From bddebe09edeb6a18f2c06986d5658a7be3a563ea Mon Sep 17 00:00:00 2001 From: Shondoit Date: Tue, 3 Jan 2023 10:26:37 +0100 Subject: Save Optimizer next to TI embedding Also add check to load only .PT and .BIN files as embeddings. (since we add .optim files in the same directory) --- modules/textual_inversion/textual_inversion.py | 40 ++++++++++++++++++++------ 1 file changed, 32 insertions(+), 8 deletions(-) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index fd253477..16176e90 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -28,6 +28,7 @@ class Embedding: self.cached_checksum = None self.sd_checkpoint = None self.sd_checkpoint_name = None + self.optimizer_state_dict = None def save(self, filename): embedding_data = { @@ -41,6 +42,13 @@ class Embedding: torch.save(embedding_data, filename) + if shared.opts.save_optimizer_state and self.optimizer_state_dict is not None: + optimizer_saved_dict = { + 'hash': self.checksum(), + 'optimizer_state_dict': self.optimizer_state_dict, + } + torch.save(optimizer_saved_dict, filename + '.optim') + def checksum(self): if self.cached_checksum is not None: return self.cached_checksum @@ -95,9 +103,10 @@ class EmbeddingDatabase: self.expected_shape = self.get_expected_shape() def process_file(path, filename): - name = os.path.splitext(filename)[0] + name, ext = os.path.splitext(filename) + ext = ext.upper() - if os.path.splitext(filename.upper())[-1] in ['.PNG', '.WEBP', '.JXL', '.AVIF']: + if ext in ['.PNG', '.WEBP', '.JXL', '.AVIF']: embed_image = Image.open(path) if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text: data = embedding_from_b64(embed_image.text['sd-ti-embedding']) @@ -105,8 +114,10 @@ class EmbeddingDatabase: else: data = extract_image_data_embed(embed_image) name = data.get('name', name) - else: + elif ext in ['.BIN', '.PT']: data = torch.load(path, map_location="cpu") + else: + return # textual inversion embeddings if 'string_to_param' in data: @@ -300,6 +311,20 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ embedding.vec.requires_grad = True optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0) + if shared.opts.save_optimizer_state: + optimizer_state_dict = None + if os.path.exists(filename + '.optim'): + optimizer_saved_dict = torch.load(filename + '.optim', map_location='cpu') + if embedding.checksum() == optimizer_saved_dict.get('hash', None): + optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None) + + if optimizer_state_dict is not None: + optimizer.load_state_dict(optimizer_state_dict) + print("Loaded existing optimizer from checkpoint") + else: + print("No saved optimizer exists in checkpoint") + + scaler = torch.cuda.amp.GradScaler() batch_size = ds.batch_size @@ -366,9 +391,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ # Before saving, change name to match current checkpoint. embedding_name_every = f'{embedding_name}-{steps_done}' last_saved_file = os.path.join(embedding_dir, f'{embedding_name_every}.pt') - #if shared.opts.save_optimizer_state: - #embedding.optimizer_state_dict = optimizer.state_dict() - save_embedding(embedding, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True) + save_embedding(embedding, optimizer, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True) embedding_yet_to_be_embedded = True write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, steps_per_epoch, { @@ -458,7 +481,7 @@ Last saved image: {html.escape(last_saved_image)}

""" filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') - save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True) + save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True) except Exception: print(traceback.format_exc(), file=sys.stderr) pass @@ -470,7 +493,7 @@ Last saved image: {html.escape(last_saved_image)}
return embedding, filename -def save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True): +def save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True): old_embedding_name = embedding.name old_sd_checkpoint = embedding.sd_checkpoint if hasattr(embedding, "sd_checkpoint") else None old_sd_checkpoint_name = embedding.sd_checkpoint_name if hasattr(embedding, "sd_checkpoint_name") else None @@ -481,6 +504,7 @@ def save_embedding(embedding, checkpoint, embedding_name, filename, remove_cache if remove_cached_checksum: embedding.cached_checksum = None embedding.name = embedding_name + embedding.optimizer_state_dict = optimizer.state_dict() embedding.save(filename) except: embedding.sd_checkpoint = old_sd_checkpoint -- cgit v1.2.1 From 192ddc04d6de0d780f73aa5fbaa8c66cd4642e1c Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Tue, 3 Jan 2023 10:34:51 -0500 Subject: add job info to modules --- modules/textual_inversion/textual_inversion.py | 1 + 1 file changed, 1 insertion(+) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index fd253477..2c1251d6 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -245,6 +245,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ create_image_every = create_image_every or 0 validate_train_inputs(embedding_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_embedding_every, create_image_every, log_directory, name="embedding") + shared.state.job = "train-embedding" shared.state.textinfo = "Initializing textual inversion training..." shared.state.job_count = steps -- cgit v1.2.1 From 184e670126f5fc50ba56fa0fedcf0cf60e45ed7e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 17:45:01 +0300 Subject: fix the merge --- modules/textual_inversion/textual_inversion.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 5421a758..8731ea5d 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -251,6 +251,7 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat if save_model_every or create_image_every: assert log_directory, "Log directory is empty" + def create_dummy_mask(x, width=None, height=None): if shared.sd_model.model.conditioning_key in {'hybrid', 'concat'}: @@ -380,17 +381,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ break with devices.autocast(): - # c = stack_conds(batch.cond).to(devices.device) - # mask = torch.tensor(batch.emb_index).to(devices.device, non_blocking=pin_memory) - # print(mask) - # c[:, 1:1+embedding.vec.shape[0]] = embedding.vec.to(devices.device, non_blocking=pin_memory) - - - if img_c is None: - img_c = create_dummy_mask(c, training_width, training_height) - x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) c = shared.sd_model.cond_stage_model(batch.cond_text) + + if img_c is None: + img_c = create_dummy_mask(c, training_width, training_height) + cond = {"c_concat": [img_c], "c_crossattn": [c]} loss = shared.sd_model(x, cond)[0] / gradient_step del x -- cgit v1.2.1 From 525cea924562afd676f55470095268a0f6fca59e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 17:58:07 +0300 Subject: use shared function from processing for creating dummy mask when training inpainting model --- modules/textual_inversion/textual_inversion.py | 33 +++++++------------------- 1 file changed, 9 insertions(+), 24 deletions(-) (limited to 'modules/textual_inversion/textual_inversion.py') diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 8731ea5d..2250e41b 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -252,26 +252,6 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat assert log_directory, "Log directory is empty" -def create_dummy_mask(x, width=None, height=None): - if shared.sd_model.model.conditioning_key in {'hybrid', 'concat'}: - - # The "masked-image" in this case will just be all zeros since the entire image is masked. - image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) - image_conditioning = shared.sd_model.get_first_stage_encoding(shared.sd_model.encode_first_stage(image_conditioning)) - - # Add the fake full 1s mask to the first dimension. - image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) - image_conditioning = image_conditioning.to(x.dtype) - - else: - # Dummy zero conditioning if we're not using inpainting model. - # Still takes up a bit of memory, but no encoder call. - # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size. - image_conditioning = torch.zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) - - return image_conditioning - - def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): save_embedding_every = save_embedding_every or 0 create_image_every = create_image_every or 0 @@ -346,7 +326,6 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ else: print("No saved optimizer exists in checkpoint") - scaler = torch.cuda.amp.GradScaler() batch_size = ds.batch_size @@ -362,7 +341,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ forced_filename = "" embedding_yet_to_be_embedded = False + is_training_inpainting_model = shared.sd_model.model.conditioning_key in {'hybrid', 'concat'} img_c = None + pbar = tqdm.tqdm(total=steps - initial_step) try: for i in range((steps-initial_step) * gradient_step): @@ -384,10 +365,14 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) c = shared.sd_model.cond_stage_model(batch.cond_text) - if img_c is None: - img_c = create_dummy_mask(c, training_width, training_height) + if is_training_inpainting_model: + if img_c is None: + img_c = processing.txt2img_image_conditioning(shared.sd_model, c, training_width, training_height) + + cond = {"c_concat": [img_c], "c_crossattn": [c]} + else: + cond = c - cond = {"c_concat": [img_c], "c_crossattn": [c]} loss = shared.sd_model(x, cond)[0] / gradient_step del x -- cgit v1.2.1