mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-11 02:15:17 +00:00
Fix OSX latent2rgb previews.
This commit is contained in:
parent
7718ada4ed
commit
6c23854f54
@ -630,8 +630,14 @@ def supports_dtype(device, dtype): #TODO
|
|||||||
def device_supports_non_blocking(device):
|
def device_supports_non_blocking(device):
|
||||||
if is_device_mps(device):
|
if is_device_mps(device):
|
||||||
return False #pytorch bug? mps doesn't support non blocking
|
return False #pytorch bug? mps doesn't support non blocking
|
||||||
|
return True
|
||||||
|
|
||||||
|
def device_should_use_non_blocking(device):
|
||||||
|
if not device_supports_non_blocking(device):
|
||||||
return False
|
return False
|
||||||
# return True #TODO: figure out why this causes issues
|
return False
|
||||||
|
# return True #TODO: figure out why this causes memory issues on Nvidia and possibly others
|
||||||
|
|
||||||
|
|
||||||
def cast_to_device(tensor, device, dtype, copy=False):
|
def cast_to_device(tensor, device, dtype, copy=False):
|
||||||
device_supports_cast = False
|
device_supports_cast = False
|
||||||
@ -643,7 +649,7 @@ def cast_to_device(tensor, device, dtype, copy=False):
|
|||||||
elif is_intel_xpu():
|
elif is_intel_xpu():
|
||||||
device_supports_cast = True
|
device_supports_cast = True
|
||||||
|
|
||||||
non_blocking = device_supports_non_blocking(device)
|
non_blocking = device_should_use_non_blocking(device)
|
||||||
|
|
||||||
if device_supports_cast:
|
if device_supports_cast:
|
||||||
if copy:
|
if copy:
|
||||||
|
@ -21,7 +21,7 @@ import comfy.model_management
|
|||||||
|
|
||||||
def cast_bias_weight(s, input):
|
def cast_bias_weight(s, input):
|
||||||
bias = None
|
bias = None
|
||||||
non_blocking = comfy.model_management.device_supports_non_blocking(input.device)
|
non_blocking = comfy.model_management.device_should_use_non_blocking(input.device)
|
||||||
if s.bias is not None:
|
if s.bias is not None:
|
||||||
bias = s.bias.to(device=input.device, dtype=input.dtype, non_blocking=non_blocking)
|
bias = s.bias.to(device=input.device, dtype=input.dtype, non_blocking=non_blocking)
|
||||||
if s.bias_function is not None:
|
if s.bias_function is not None:
|
||||||
|
@ -4,6 +4,7 @@ import struct
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from comfy.cli_args import args, LatentPreviewMethod
|
from comfy.cli_args import args, LatentPreviewMethod
|
||||||
from comfy.taesd.taesd import TAESD
|
from comfy.taesd.taesd import TAESD
|
||||||
|
import comfy.model_management
|
||||||
import folder_paths
|
import folder_paths
|
||||||
import comfy.utils
|
import comfy.utils
|
||||||
import logging
|
import logging
|
||||||
@ -43,7 +44,7 @@ class Latent2RGBPreviewer(LatentPreviewer):
|
|||||||
latents_ubyte = (((latent_image + 1) / 2)
|
latents_ubyte = (((latent_image + 1) / 2)
|
||||||
.clamp(0, 1) # change scale from -1..1 to 0..1
|
.clamp(0, 1) # change scale from -1..1 to 0..1
|
||||||
.mul(0xFF) # to 0..255
|
.mul(0xFF) # to 0..255
|
||||||
).to(device="cpu", dtype=torch.uint8, non_blocking=True)
|
).to(device="cpu", dtype=torch.uint8, non_blocking=comfy.model_management.device_supports_non_blocking(latent_image.device))
|
||||||
|
|
||||||
return Image.fromarray(latents_ubyte.numpy())
|
return Image.fromarray(latents_ubyte.numpy())
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user