From 66e28ef45c02437c1ca6a31afbe5f399eda15256 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 4 Feb 2024 20:53:35 -0500 Subject: [PATCH] Don't use is_bf16_supported to check for fp16 support. --- comfy/model_management.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index aa40c502..a8dc91b9 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -722,10 +722,13 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True, ma if is_intel_xpu(): return True - if torch.cuda.is_bf16_supported(): + if torch.version.hip: return True props = torch.cuda.get_device_properties("cuda") + if props.major >= 8: + return True + if props.major < 6: return False