Add a --force-channels-last to inference models in channel last mode.

This commit is contained in:
comfyanonymous 2024-06-15 01:08:12 -04:00
parent 0e06b370db
commit 0ec513d877
3 changed files with 10 additions and 0 deletions

View File

@ -75,6 +75,7 @@ fpte_group.add_argument("--fp8_e5m2-text-enc", action="store_true", help="Store
fpte_group.add_argument("--fp16-text-enc", action="store_true", help="Store text encoder weights in fp16.")
fpte_group.add_argument("--fp32-text-enc", action="store_true", help="Store text encoder weights in fp32.")
parser.add_argument("--force-channels-last", action="store_true", help="Force channels last format when inferencing the models.")
parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.")

View File

@ -66,6 +66,9 @@ class BaseModel(torch.nn.Module):
else:
operations = comfy.ops.disable_weight_init
self.diffusion_model = unet_model(**unet_config, device=device, operations=operations)
if comfy.model_management.force_channels_last():
self.diffusion_model.to(memory_format=torch.channels_last)
logging.debug("using channels last mode for diffusion model")
self.model_type = model_type
self.model_sampling = model_sampling(model_config, model_type)

View File

@ -673,6 +673,12 @@ def device_should_use_non_blocking(device):
return False
# return True #TODO: figure out why this causes memory issues on Nvidia and possibly others
def force_channels_last():
if args.force_channels_last:
return True
#TODO
return False
def cast_to_device(tensor, device, dtype, copy=False):
device_supports_cast = False