diff --git a/comfy/model_management.py b/comfy/model_management.py index 3a4c93e3..1bb6156d 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -186,12 +186,21 @@ def get_total_memory(dev=None, torch_total_too=False): else: return mem_total +def mac_version(): + try: + return tuple(int(n) for n in platform.mac_ver()[0].split(".")) + except: + return None + total_vram = get_total_memory(get_torch_device()) / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024) logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)) try: logging.info("pytorch version: {}".format(torch_version)) + mac_ver = mac_version() + if mac_ver is not None: + print("Mac Version", mac_ver) except: pass @@ -969,12 +978,6 @@ def pytorch_attention_flash_attention(): return True #if you have pytorch attention enabled on AMD it probably supports at least mem efficient attention return False -def mac_version(): - try: - return tuple(int(n) for n in platform.mac_ver()[0].split(".")) - except: - return None - def force_upcast_attention_dtype(): upcast = args.force_upcast_attention