diff options
author | Ljzd-PRO <63289359+Ljzd-PRO@users.noreply.github.com> | 2022-10-13 02:07:49 +0800 |
---|---|---|
committer | AUTOMATIC1111 <16777216c@gmail.com> | 2022-10-14 19:57:23 +0300 |
commit | 4a216ded433ded315106e2989c5ff7dec1c49304 (patch) | |
tree | 1b476fe00e32cce388bd2f36ac9a893277aa4e20 /modules | |
parent | a8eeb2b7ad0c43ad60ac2ba8bd299b9cb265fdd3 (diff) |
load models to VRAM when using `--lowram` param
load models to VRM instead of RAM (for machines which have bigger VRM than RAM such as free Google Colab server)
Diffstat (limited to 'modules')
-rw-r--r-- | modules/sd_models.py | 15 |
1 files changed, 13 insertions, 2 deletions
diff --git a/modules/sd_models.py b/modules/sd_models.py index 0a55b4c3..78a198b9 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -134,7 +134,12 @@ def load_model_weights(model, checkpoint_info): print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
- pl_sd = torch.load(checkpoint_file, map_location="cpu")
+ if shared.cmd_opts.lowram:
+ print("Load to VRAM if GPU is available (low RAM)")
+ pl_sd = torch.load(checkpoint_file)
+ else:
+ pl_sd = torch.load(checkpoint_file, map_location="cpu")
+
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
@@ -158,7 +163,13 @@ def load_model_weights(model, checkpoint_info): if os.path.exists(vae_file):
print(f"Loading VAE weights from: {vae_file}")
- vae_ckpt = torch.load(vae_file, map_location="cpu")
+
+ if shared.cmd_opts.lowram:
+ print("Load to VRAM if GPU is available (low RAM)")
+ vae_ckpt = torch.load(vae_file)
+ else:
+ vae_ckpt = torch.load(vae_file, map_location="cpu")
+
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
model.first_stage_model.load_state_dict(vae_dict)
|