diff --git a/comfy/model_management.py b/comfy/model_management.py index f7374aa1..97ffc283 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -7,6 +7,8 @@ NORMAL_VRAM = 3 accelerate_enabled = False vram_state = NORMAL_VRAM +total_vram_available_mb = -1 + import sys set_vram_to = NORMAL_VRAM @@ -24,6 +26,13 @@ if set_vram_to != NORMAL_VRAM: import traceback print(traceback.format_exc()) print("ERROR: COULD NOT ENABLE LOW VRAM MODE.") + try: + import torch + total_vram_available_mb = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) + except: + pass + total_vram_available_mb = (total_vram_available_mb - 1024) // 2 + total_vram_available_mb = int(max(256, total_vram_available_mb)) print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM"][vram_state]) @@ -71,7 +80,7 @@ def load_model_gpu(model): if vram_state == NO_VRAM: device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"}) elif vram_state == LOW_VRAM: - device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "1GiB", "cpu": "16GiB"}) + device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"}) accelerate.dispatch_model(real_model, device_map=device_map, main_device="cuda") model_accelerated = True return current_loaded_model