From 037c38eb0fff2b18344faec3323c2703eadf2ec7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 8 Aug 2024 17:28:35 -0400 Subject: [PATCH] Try to improve inference speed on some machines. --- comfy/model_management.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index b7aff9f5..7fbb4282 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -432,11 +432,11 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu global vram_state inference_memory = minimum_inference_memory() - extra_mem = max(inference_memory, memory_required) + extra_mem = max(inference_memory, memory_required) + 100 * 1024 * 1024 if minimum_memory_required is None: minimum_memory_required = extra_mem else: - minimum_memory_required = max(inference_memory, minimum_memory_required) + minimum_memory_required = max(inference_memory, minimum_memory_required) + 100 * 1024 * 1024 models = set(models)