diff options
author | brkirch <brkirch@users.noreply.github.com> | 2022-11-28 21:36:35 -0500 |
---|---|---|
committer | brkirch <brkirch@users.noreply.github.com> | 2022-11-30 10:33:42 -0500 |
commit | 4d5f1691dda971ec7b461dd880426300fd54ccee (patch) | |
tree | c9ebed9f119ae66c13e716ff867a8c20108389a9 /modules/textual_inversion | |
parent | 21effd629d0fdfdbbff2b20a9f4a3767e7e8bd33 (diff) |
Use devices.autocast instead of torch.autocast
Diffstat (limited to 'modules/textual_inversion')
-rw-r--r-- | modules/textual_inversion/dataset.py | 4 | ||||
-rw-r--r-- | modules/textual_inversion/textual_inversion.py | 2 |
2 files changed, 3 insertions, 3 deletions
diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index e5725f33..2dc64c3c 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -82,7 +82,7 @@ class PersonalizedBase(Dataset): torchdata = torch.from_numpy(npimage).permute(2, 0, 1).to(device=device, dtype=torch.float32)
latent_sample = None
- with torch.autocast("cuda"):
+ with devices.autocast():
latent_dist = model.encode_first_stage(torchdata.unsqueeze(dim=0))
if latent_sampling_method == "once" or (latent_sampling_method == "deterministic" and not isinstance(latent_dist, DiagonalGaussianDistribution)):
@@ -101,7 +101,7 @@ class PersonalizedBase(Dataset): entry.cond_text = self.create_text(filename_text)
if include_cond and not (self.tag_drop_out != 0 or self.shuffle_tags):
- with torch.autocast("cuda"):
+ with devices.autocast():
entry.cond = cond_model([entry.cond_text]).to(devices.cpu).squeeze(0)
self.dataset.append(entry)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 4eb75cb5..daf8d1b8 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -316,7 +316,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ if shared.state.interrupted:
break
- with torch.autocast("cuda"):
+ with devices.autocast():
# c = stack_conds(batch.cond).to(devices.device)
# mask = torch.tensor(batch.emb_index).to(devices.device, non_blocking=pin_memory)
# print(mask)
|