From 6e28a46454f2188844d60d54e5f3400a8ec0b81e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 10 Jun 2025 10:06:24 -0700 Subject: [PATCH] Apple most likely is never fixing the fp16 attention bug. (#8485) --- comfy/model_management.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 28381561..05429143 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1052,7 +1052,7 @@ def pytorch_attention_flash_attention(): global ENABLE_PYTORCH_ATTENTION if ENABLE_PYTORCH_ATTENTION: #TODO: more reliable way of checking for flash attention? - if is_nvidia(): #pytorch flash attention only works on Nvidia + if is_nvidia(): return True if is_intel_xpu(): return True @@ -1068,7 +1068,7 @@ def force_upcast_attention_dtype(): upcast = args.force_upcast_attention macos_version = mac_version() - if macos_version is not None and ((14, 5) <= macos_version < (16,)): # black image bug on recent versions of macOS + if macos_version is not None and ((14, 5) <= macos_version): # black image bug on recent versions of macOS, I don't think it's ever getting fixed upcast = True if upcast: