Workaround for lora OOM on lowvram mode.

This commit is contained in:
comfyanonymous 2024-08-07 14:30:54 -04:00
parent 1208863eca
commit cb7c4b4be3

View File

@ -348,8 +348,8 @@ class ModelPatcher:
m.comfy_cast_weights = True
else:
if hasattr(m, "weight"):
self.patch_weight_to_device(weight_key, device_to)
self.patch_weight_to_device(bias_key, device_to)
self.patch_weight_to_device(weight_key) #TODO: speed this up without causing OOM
self.patch_weight_to_device(bias_key)
m.to(device_to)
mem_counter += comfy.model_management.module_size(m)
logging.debug("lowvram: loaded module regularly {} {}".format(n, m))