mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-25 15:55:18 +00:00
Add a command line argument to enable backend:cudaMallocAsync
This commit is contained in:
parent
3a150bad15
commit
1679abd86d
@ -40,6 +40,7 @@ parser.add_argument("--extra-model-paths-config", type=str, default=None, metava
|
|||||||
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
|
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
|
||||||
parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.")
|
parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.")
|
||||||
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
|
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
|
||||||
|
parser.add_argument("--cuda-malloc", action="store_true", help="Enable cudaMallocAsync.")
|
||||||
parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.")
|
parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.")
|
||||||
|
|
||||||
fp_group = parser.add_mutually_exclusive_group()
|
fp_group = parser.add_mutually_exclusive_group()
|
||||||
|
@ -204,7 +204,7 @@ print(f"Set vram state to: {vram_state.name}")
|
|||||||
def get_torch_device_name(device):
|
def get_torch_device_name(device):
|
||||||
if hasattr(device, 'type'):
|
if hasattr(device, 'type'):
|
||||||
if device.type == "cuda":
|
if device.type == "cuda":
|
||||||
return "{} {}".format(device, torch.cuda.get_device_name(device))
|
return "{} {} : {}".format(device, torch.cuda.get_device_name(device), torch.cuda.get_allocator_backend())
|
||||||
else:
|
else:
|
||||||
return "{}".format(device.type)
|
return "{}".format(device.type)
|
||||||
else:
|
else:
|
||||||
|
10
main.py
10
main.py
@ -51,7 +51,6 @@ import threading
|
|||||||
import gc
|
import gc
|
||||||
|
|
||||||
from comfy.cli_args import args
|
from comfy.cli_args import args
|
||||||
import comfy.utils
|
|
||||||
|
|
||||||
if os.name == "nt":
|
if os.name == "nt":
|
||||||
import logging
|
import logging
|
||||||
@ -62,7 +61,16 @@ if __name__ == "__main__":
|
|||||||
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
|
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
|
||||||
print("Set cuda device to:", args.cuda_device)
|
print("Set cuda device to:", args.cuda_device)
|
||||||
|
|
||||||
|
if args.cuda_malloc:
|
||||||
|
env_var = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', None)
|
||||||
|
if env_var is None:
|
||||||
|
env_var = "backend:cudaMallocAsync"
|
||||||
|
else:
|
||||||
|
env_var += ",backend:cudaMallocAsync"
|
||||||
|
|
||||||
|
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = env_var
|
||||||
|
|
||||||
|
import comfy.utils
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
import execution
|
import execution
|
||||||
|
Loading…
Reference in New Issue
Block a user