diff options
author | pieresimakp <69743585+pieresimakp@users.noreply.github.com> | 2023-03-25 23:00:45 +0800 |
---|---|---|
committer | pieresimakp <69743585+pieresimakp@users.noreply.github.com> | 2023-03-25 23:00:45 +0800 |
commit | e3b9d0e3e8adfb6214a1eb7acf450574f427ff9d (patch) | |
tree | c9c64ad1f926df990fb2ce05c6eec063de195eec /modules/sd_hijack_optimizations.py | |
parent | 771ea212de13711b494b082d8e94e79b17ac9d08 (diff) | |
parent | 91ae48fd7e20c60d6374f340cac0939f56d87048 (diff) |
Merge branch 'master' into img2img-detect-image-size
Diffstat (limited to 'modules/sd_hijack_optimizations.py')
-rw-r--r-- | modules/sd_hijack_optimizations.py | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 2e307b5d..372555ff 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -337,7 +337,7 @@ def xformers_attention_forward(self, x, context=None, mask=None): dtype = q.dtype
if shared.opts.upcast_attn:
- q, k = q.float(), k.float()
+ q, k, v = q.float(), k.float(), v.float()
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=get_xformers_flash_attention_op(q, k, v))
@@ -372,7 +372,7 @@ def scaled_dot_product_attention_forward(self, x, context=None, mask=None): dtype = q.dtype
if shared.opts.upcast_attn:
- q, k = q.float(), k.float()
+ q, k, v = q.float(), k.float(), v.float()
# the output of sdp = (batch, num_heads, seq_len, head_dim)
hidden_states = torch.nn.functional.scaled_dot_product_attention(
|