mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-04-20 03:13:30 +00:00
Add a --force-channels-last to inference models in channel last mode.
This commit is contained in:
parent
0e06b370db
commit
0ec513d877
@ -75,6 +75,7 @@ fpte_group.add_argument("--fp8_e5m2-text-enc", action="store_true", help="Store
|
|||||||
fpte_group.add_argument("--fp16-text-enc", action="store_true", help="Store text encoder weights in fp16.")
|
fpte_group.add_argument("--fp16-text-enc", action="store_true", help="Store text encoder weights in fp16.")
|
||||||
fpte_group.add_argument("--fp32-text-enc", action="store_true", help="Store text encoder weights in fp32.")
|
fpte_group.add_argument("--fp32-text-enc", action="store_true", help="Store text encoder weights in fp32.")
|
||||||
|
|
||||||
|
parser.add_argument("--force-channels-last", action="store_true", help="Force channels last format when inferencing the models.")
|
||||||
|
|
||||||
parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.")
|
parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.")
|
||||||
|
|
||||||
|
@ -66,6 +66,9 @@ class BaseModel(torch.nn.Module):
|
|||||||
else:
|
else:
|
||||||
operations = comfy.ops.disable_weight_init
|
operations = comfy.ops.disable_weight_init
|
||||||
self.diffusion_model = unet_model(**unet_config, device=device, operations=operations)
|
self.diffusion_model = unet_model(**unet_config, device=device, operations=operations)
|
||||||
|
if comfy.model_management.force_channels_last():
|
||||||
|
self.diffusion_model.to(memory_format=torch.channels_last)
|
||||||
|
logging.debug("using channels last mode for diffusion model")
|
||||||
self.model_type = model_type
|
self.model_type = model_type
|
||||||
self.model_sampling = model_sampling(model_config, model_type)
|
self.model_sampling = model_sampling(model_config, model_type)
|
||||||
|
|
||||||
|
@ -673,6 +673,12 @@ def device_should_use_non_blocking(device):
|
|||||||
return False
|
return False
|
||||||
# return True #TODO: figure out why this causes memory issues on Nvidia and possibly others
|
# return True #TODO: figure out why this causes memory issues on Nvidia and possibly others
|
||||||
|
|
||||||
|
def force_channels_last():
|
||||||
|
if args.force_channels_last:
|
||||||
|
return True
|
||||||
|
|
||||||
|
#TODO
|
||||||
|
return False
|
||||||
|
|
||||||
def cast_to_device(tensor, device, dtype, copy=False):
|
def cast_to_device(tensor, device, dtype, copy=False):
|
||||||
device_supports_cast = False
|
device_supports_cast = False
|
||||||
|
Loading…
Reference in New Issue
Block a user