From 8773ccf74d189c21916f2a025df04f7a26a446a8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 13 Feb 2025 08:32:36 -0500 Subject: [PATCH] Better memory estimation for ROCm that support mem efficient attention. There is no way to check if the card actually supports it so it assumes that it does if you use --use-pytorch-cross-attention with yours. --- comfy/model_management.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index f3d90c66..212ce9af 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -909,6 +909,8 @@ def pytorch_attention_flash_attention(): return True if is_ascend_npu(): return True + if is_amd(): + return True #if you have pytorch attention enabled on AMD it probably supports at least mem efficient attention return False def mac_version():