From 31b6852f1963d8c85bbf5a9ad0228217faf9753c Mon Sep 17 00:00:00 2001 From: rickard Date: Mon, 23 Dec 2024 08:19:34 +0100 Subject: [PATCH 1/3] load ltx loras trained with finetrainers --- comfy/lora.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/comfy/lora.py b/comfy/lora.py index ec3da6f4..ccd52a66 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -64,6 +64,7 @@ def load_lora(lora, to_load, log_missing=True): diffusers3_lora = "{}.lora.up.weight".format(x) mochi_lora = "{}.lora_B".format(x) transformers_lora = "{}.lora_linear_layer.up.weight".format(x) + ltx_lora = "transformer.{}.lora_B.weight".format(x) A_name = None if regular_lora in lora.keys(): @@ -90,6 +91,10 @@ def load_lora(lora, to_load, log_missing=True): A_name = transformers_lora B_name ="{}.lora_linear_layer.down.weight".format(x) mid_name = None + elif ltx_lora in lora.keys(): + A_name = ltx_lora + B_name = "transformer.{}.lora_A.weight".format(x) + mid_name = None if A_name is not None: mid = None From 21f20638bda1360328a30eeee69d0fdb02430923 Mon Sep 17 00:00:00 2001 From: rickard Date: Mon, 23 Dec 2024 09:20:22 +0100 Subject: [PATCH 2/3] move code to model_lora_keys_unet --- comfy/lora.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/comfy/lora.py b/comfy/lora.py index ccd52a66..d6ad5fb8 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -64,7 +64,6 @@ def load_lora(lora, to_load, log_missing=True): diffusers3_lora = "{}.lora.up.weight".format(x) mochi_lora = "{}.lora_B".format(x) transformers_lora = "{}.lora_linear_layer.up.weight".format(x) - ltx_lora = "transformer.{}.lora_B.weight".format(x) A_name = None if regular_lora in lora.keys(): @@ -91,10 +90,6 @@ def load_lora(lora, to_load, log_missing=True): A_name = transformers_lora B_name ="{}.lora_linear_layer.down.weight".format(x) mid_name = None - elif ltx_lora in lora.keys(): - A_name = ltx_lora - B_name = "transformer.{}.lora_A.weight".format(x) - mid_name = None if A_name is not None: mid = None @@ -404,6 +399,12 @@ def model_lora_keys_unet(model, key_map={}): key_map["transformer.{}".format(key_lora)] = k key_map["diffusion_model.{}".format(key_lora)] = k # Old loras + if isinstance(model, comfy.model_base.LTXV): + for k in sdk: + if k.startswith("transformer.") and k.endswith(".weight"): #Official Mochi lora format + key_lora = k[len("transformer."):-len(".weight")] + key_map["{}".format(key_lora)] = k + return key_map From ceee378d86b6f40a4856fb3e7c294f9301bc0e09 Mon Sep 17 00:00:00 2001 From: rickard Date: Wed, 25 Dec 2024 18:04:56 +0100 Subject: [PATCH 3/3] replace 'diffusion_model' with 'transformer' --- comfy/lora.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/lora.py b/comfy/lora.py index d6ad5fb8..e3ddb904 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -401,9 +401,9 @@ def model_lora_keys_unet(model, key_map={}): if isinstance(model, comfy.model_base.LTXV): for k in sdk: - if k.startswith("transformer.") and k.endswith(".weight"): #Official Mochi lora format - key_lora = k[len("transformer."):-len(".weight")] - key_map["{}".format(key_lora)] = k + if k.startswith("diffusion_model.") and k.endswith(".weight"): + key_lora = k[len("diffusion_model."):-len(".weight")] + key_map["transformer.{}".format(key_lora)] = k return key_map