From 8508df25691b0c9213049ab0d723610d3d8f9136 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 21 May 2024 16:56:33 -0400 Subject: [PATCH] Work around black image bug on Mac 14.5 by forcing attention upcasting. --- comfy/ldm/modules/attention.py | 5 +++-- comfy/model_management.py | 13 +++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 2ce99d46..93c94458 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -19,12 +19,13 @@ from comfy.cli_args import args import comfy.ops ops = comfy.ops.disable_weight_init +FORCE_UPCAST_ATTENTION_DTYPE = model_management.force_upcast_attention_dtype() def get_attn_precision(attn_precision): if args.dont_upcast_attention: return None - if attn_precision is None and args.force_upcast_attention: - return torch.float32 + if FORCE_UPCAST_ATTENTION_DTYPE is not None: + return FORCE_UPCAST_ATTENTION_DTYPE return attn_precision def exists(val): diff --git a/comfy/model_management.py b/comfy/model_management.py index b2192862..fbfbb7e4 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -5,6 +5,7 @@ from comfy.cli_args import args import comfy.utils import torch import sys +import platform class VRAMState(Enum): DISABLED = 0 #No vram present: no need to move models to vram @@ -685,6 +686,18 @@ def pytorch_attention_flash_attention(): return True return False +def force_upcast_attention_dtype(): + upcast = args.force_upcast_attention + try: + if platform.mac_ver()[0] in ['14.5']: #black image bug on OSX Sonoma 14.5 + upcast = True + except: + pass + if upcast: + return torch.float32 + else: + return None + def get_free_memory(dev=None, torch_free_too=False): global directml_enabled if dev is None: