Apple most likely is never fixing the fp16 attention bug. (#8485)

This commit is contained in:
comfyanonymous 2025-06-10 10:06:24 -07:00 committed by GitHub
parent c7b25784b1
commit 6e28a46454
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1052,7 +1052,7 @@ def pytorch_attention_flash_attention():
global ENABLE_PYTORCH_ATTENTION global ENABLE_PYTORCH_ATTENTION
if ENABLE_PYTORCH_ATTENTION: if ENABLE_PYTORCH_ATTENTION:
#TODO: more reliable way of checking for flash attention? #TODO: more reliable way of checking for flash attention?
if is_nvidia(): #pytorch flash attention only works on Nvidia if is_nvidia():
return True return True
if is_intel_xpu(): if is_intel_xpu():
return True return True
@ -1068,7 +1068,7 @@ def force_upcast_attention_dtype():
upcast = args.force_upcast_attention upcast = args.force_upcast_attention
macos_version = mac_version() macos_version = mac_version()
if macos_version is not None and ((14, 5) <= macos_version < (16,)): # black image bug on recent versions of macOS if macos_version is not None and ((14, 5) <= macos_version): # black image bug on recent versions of macOS, I don't think it's ever getting fixed
upcast = True upcast = True
if upcast: if upcast: