diff options
author | AUTOMATIC <16777216c@gmail.com> | 2022-10-14 20:03:41 +0300 |
---|---|---|
committer | AUTOMATIC <16777216c@gmail.com> | 2022-10-14 20:03:41 +0300 |
commit | bb295f54785ac36dc6aa6f7103a3431464440fc3 (patch) | |
tree | 56c2ed61dd818754c7934d656a6753b0f8c38374 /modules/sd_models.py | |
parent | 4a216ded433ded315106e2989c5ff7dec1c49304 (diff) |
rework the code for lowram a bit
Diffstat (limited to 'modules/sd_models.py')
-rw-r--r-- | modules/sd_models.py | 12 |
1 files changed, 2 insertions, 10 deletions
diff --git a/modules/sd_models.py b/modules/sd_models.py index 78a198b9..3a01c93d 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -134,11 +134,7 @@ def load_model_weights(model, checkpoint_info): print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
- if shared.cmd_opts.lowram:
- print("Load to VRAM if GPU is available (low RAM)")
- pl_sd = torch.load(checkpoint_file)
- else:
- pl_sd = torch.load(checkpoint_file, map_location="cpu")
+ pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
@@ -164,11 +160,7 @@ def load_model_weights(model, checkpoint_info): if os.path.exists(vae_file):
print(f"Loading VAE weights from: {vae_file}")
- if shared.cmd_opts.lowram:
- print("Load to VRAM if GPU is available (low RAM)")
- vae_ckpt = torch.load(vae_file)
- else:
- vae_ckpt = torch.load(vae_file, map_location="cpu")
+ vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location)
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
|