Don't try to get vram from xpu or cuda when directml is enabled.

This commit is contained in:
comfyanonymous 2023-04-29 00:28:48 -04:00
parent 2ca934f7d4
commit 056e5545ff

View File

@ -34,13 +34,16 @@ if args.directml is not None:
try: try:
import torch import torch
try: if directml_enabled:
import intel_extension_for_pytorch as ipex total_vram = 4097 #TODO
if torch.xpu.is_available(): else:
xpu_available = True try:
total_vram = torch.xpu.get_device_properties(torch.xpu.current_device()).total_memory / (1024 * 1024) import intel_extension_for_pytorch as ipex
except: if torch.xpu.is_available():
total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) xpu_available = True
total_vram = torch.xpu.get_device_properties(torch.xpu.current_device()).total_memory / (1024 * 1024)
except:
total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024)
total_ram = psutil.virtual_memory().total / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024)
if not args.normalvram and not args.cpu: if not args.normalvram and not args.cpu:
if total_vram <= 4096: if total_vram <= 4096: