From 9e9c8a1c647698f872954757e22667b4c314f369 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 2 Jan 2025 08:44:16 -0500 Subject: [PATCH] Clear cache as often on AMD as Nvidia. I think the issue this was working around has been solved. If you notice that this change slows things down or causes stutters on your AMD GPU with ROCm on Linux please report it. --- comfy/model_management.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 731fb584..15800a8e 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1121,9 +1121,8 @@ def soft_empty_cache(force=False): elif is_ascend_npu(): torch.npu.empty_cache() elif torch.cuda.is_available(): - if force or is_nvidia(): #This seems to make things worse on ROCm so I only do it for cuda - torch.cuda.empty_cache() - torch.cuda.ipc_collect() + torch.cuda.empty_cache() + torch.cuda.ipc_collect() def unload_all_models(): free_memory(1e30, get_torch_device())