diff options
author | AUTOMATIC1111 <16777216c@gmail.com> | 2023-08-04 08:05:21 +0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-08-04 08:05:21 +0300 |
commit | 56c3f94ba30d76bf680db9bc765624b9a143d769 (patch) | |
tree | 6924fc635fe13fe59911a285313308be2d9acdc7 /modules/sd_hijack.py | |
parent | 952effa8b10dba2f2f7f2cf4191f987e3666e9f0 (diff) | |
parent | 073c0ebba380acbd73be8262feba41212165ff84 (diff) |
Merge branch 'dev' into dev
Diffstat (limited to 'modules/sd_hijack.py')
-rw-r--r-- | modules/sd_hijack.py | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index f5615967..cfa5f0eb 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -197,7 +197,7 @@ class StableDiffusionModelHijack: conditioner.embedders[i] = sd_hijack_clip.FrozenCLIPEmbedderForSDXLWithCustomWords(embedder, self)
text_cond_models.append(conditioner.embedders[i])
if typename == 'FrozenOpenCLIPEmbedder2':
- embedder.model.token_embedding = EmbeddingsWithFixes(embedder.model.token_embedding, self)
+ embedder.model.token_embedding = EmbeddingsWithFixes(embedder.model.token_embedding, self, textual_inversion_key='clip_g')
conditioner.embedders[i] = sd_hijack_open_clip.FrozenOpenCLIPEmbedder2WithCustomWords(embedder, self)
text_cond_models.append(conditioner.embedders[i])
@@ -243,7 +243,7 @@ class StableDiffusionModelHijack: ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = sd_unet.UNetModel_forward
def undo_hijack(self, m):
- if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation:
+ if type(m.cond_stage_model) == sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords:
m.cond_stage_model = m.cond_stage_model.wrapped
elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords:
@@ -292,10 +292,11 @@ class StableDiffusionModelHijack: class EmbeddingsWithFixes(torch.nn.Module):
- def __init__(self, wrapped, embeddings):
+ def __init__(self, wrapped, embeddings, textual_inversion_key='clip_l'):
super().__init__()
self.wrapped = wrapped
self.embeddings = embeddings
+ self.textual_inversion_key = textual_inversion_key
def forward(self, input_ids):
batch_fixes = self.embeddings.fixes
@@ -309,7 +310,8 @@ class EmbeddingsWithFixes(torch.nn.Module): vecs = []
for fixes, tensor in zip(batch_fixes, inputs_embeds):
for offset, embedding in fixes:
- emb = devices.cond_cast_unet(embedding.vec)
+ vec = embedding.vec[self.textual_inversion_key] if isinstance(embedding.vec, dict) else embedding.vec
+ emb = devices.cond_cast_unet(vec)
emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0])
tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]])
|