aboutsummaryrefslogtreecommitdiff
path: root/modules/sd_models.py
diff options
context:
space:
mode:
authorTim Patton <38817597+pattontim@users.noreply.github.com>2022-11-21 16:40:18 -0500
committerTim Patton <38817597+pattontim@users.noreply.github.com>2022-11-21 16:40:18 -0500
commit210cb4c128afdd65fa998229a97d0694154983ea (patch)
tree727772959ad8e680c57264cdd6efa3015957846e /modules/sd_models.py
parente134b74ce95773789f69d158d23e93b7fe9295dc (diff)
Use GPU for loading safetensors, disable export
Diffstat (limited to 'modules/sd_models.py')
-rw-r--r--modules/sd_models.py5
1 files changed, 3 insertions, 2 deletions
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 2f8c2c48..2bbb3bf5 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -147,8 +147,9 @@ def torch_load(model_filename, model_info, map_override=None):
map_override=shared.weight_load_location if not map_override else map_override
if(checkpoint_types[model_info.exttype] == 'safetensors'):
# safely load weights
- # TODO: safetensors supports zero copy fast load to gpu, see issue #684
- return load_file(model_filename, device=map_override)
+ # TODO: safetensors supports zero copy fast load to gpu, see issue #684.
+ # GPU only for now, see https://github.com/huggingface/safetensors/issues/95
+ return load_file(model_filename, device='cuda')
else:
return torch.load(model_filename, map_location=map_override)