Compare commits

...

3 Commits

Author SHA1 Message Date
Alexander Piskun
5f61144bc6
Merge 1f6ab7dbfb into 2307ff6746 2025-01-08 19:17:00 -05:00
comfyanonymous
2307ff6746 Improve some logging messages. 2025-01-08 19:05:22 -05:00
bigcat88
1f6ab7dbfb
support for "unload_models" flag when creating a task
Signed-off-by: bigcat88 <bigcat88@icloud.com>
2024-12-24 17:41:49 +02:00
3 changed files with 12 additions and 3 deletions

View File

@ -111,7 +111,7 @@ class CLIP:
model_management.load_models_gpu([self.patcher], force_full_load=True) model_management.load_models_gpu([self.patcher], force_full_load=True)
self.layer_idx = None self.layer_idx = None
self.use_clip_schedule = False self.use_clip_schedule = False
logging.info("CLIP model load device: {}, offload device: {}, current: {}, dtype: {}".format(load_device, offload_device, params['device'], dtype)) logging.info("CLIP/text encoder model load device: {}, offload device: {}, current: {}, dtype: {}".format(load_device, offload_device, params['device'], dtype))
def clone(self): def clone(self):
n = CLIP(no_init=True) n = CLIP(no_init=True)
@ -898,7 +898,7 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c
if output_model: if output_model:
model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device()) model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device())
if inital_load_device != torch.device("cpu"): if inital_load_device != torch.device("cpu"):
logging.info("loaded straight to GPU") logging.info("loaded diffusion model directly to GPU")
model_management.load_models_gpu([model_patcher], force_full_load=True) model_management.load_models_gpu([model_patcher], force_full_load=True)
return (model_patcher, clip, vae, clipvision) return (model_patcher, clip, vae, clipvision)

View File

@ -166,6 +166,14 @@ def prompt_worker(q, server_instance):
queue_item = q.get(timeout=timeout) queue_item = q.get(timeout=timeout)
if queue_item is not None: if queue_item is not None:
item, item_id = queue_item item, item_id = queue_item
if item[3].get("unload_models"):
# For those cases where the flag is set, to clear memory before execution
comfy.model_management.unload_all_models()
gc.collect()
comfy.model_management.soft_empty_cache()
last_gc_collect = time.perf_counter()
execution_start_time = time.perf_counter() execution_start_time = time.perf_counter()
prompt_id = item[1] prompt_id = item[1]
server_instance.last_prompt_id = prompt_id server_instance.last_prompt_id = prompt_id

View File

@ -4,7 +4,8 @@ lint.ignore = ["ALL"]
# Enable specific rules # Enable specific rules
lint.select = [ lint.select = [
"S307", # suspicious-eval-usage "S307", # suspicious-eval-usage
"T201", # print-usage "S102", # exec
"T", # print-usage
"W", "W",
# The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names. # The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names.
# See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f # See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f