From eec449ca8e4b3741032f7fed9372ba52040eb563 Mon Sep 17 00:00:00 2001 From: Simon Lui <502929+simonlui@users.noreply.github.com> Date: Fri, 22 Sep 2023 21:11:27 -0700 Subject: [PATCH] Allow Intel GPUs to LoRA cast on GPU since it supports BF16 natively. --- comfy/model_management.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 1050c13a..8b896372 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -488,6 +488,8 @@ def cast_to_device(tensor, device, dtype, copy=False): elif tensor.dtype == torch.bfloat16: if hasattr(device, 'type') and device.type.startswith("cuda"): device_supports_cast = True + elif is_intel_xpu(): + device_supports_cast = True if device_supports_cast: if copy: