mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-03-15 05:57:20 +00:00
Add a node to set CLIP skip.
Use a more simple way to detect if the model is -v prediction.
This commit is contained in:
parent
fed315a76a
commit
4215206281
@ -81,7 +81,7 @@ class DDPM(torch.nn.Module):
|
|||||||
super().__init__()
|
super().__init__()
|
||||||
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
|
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
|
||||||
self.parameterization = parameterization
|
self.parameterization = parameterization
|
||||||
# print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
|
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
|
||||||
self.cond_stage_model = None
|
self.cond_stage_model = None
|
||||||
self.clip_denoised = clip_denoised
|
self.clip_denoised = clip_denoised
|
||||||
self.log_every_t = log_every_t
|
self.log_every_t = log_every_t
|
||||||
|
17
comfy/sd.py
17
comfy/sd.py
@ -266,6 +266,7 @@ class CLIP:
|
|||||||
self.cond_stage_model = clip(**(params))
|
self.cond_stage_model = clip(**(params))
|
||||||
self.tokenizer = tokenizer(embedding_directory=embedding_directory)
|
self.tokenizer = tokenizer(embedding_directory=embedding_directory)
|
||||||
self.patcher = ModelPatcher(self.cond_stage_model)
|
self.patcher = ModelPatcher(self.cond_stage_model)
|
||||||
|
self.layer_idx = -1
|
||||||
|
|
||||||
def clone(self):
|
def clone(self):
|
||||||
n = CLIP(no_init=True)
|
n = CLIP(no_init=True)
|
||||||
@ -273,6 +274,7 @@ class CLIP:
|
|||||||
n.patcher = self.patcher.clone()
|
n.patcher = self.patcher.clone()
|
||||||
n.cond_stage_model = self.cond_stage_model
|
n.cond_stage_model = self.cond_stage_model
|
||||||
n.tokenizer = self.tokenizer
|
n.tokenizer = self.tokenizer
|
||||||
|
n.layer_idx = self.layer_idx
|
||||||
return n
|
return n
|
||||||
|
|
||||||
def load_from_state_dict(self, sd):
|
def load_from_state_dict(self, sd):
|
||||||
@ -282,9 +284,10 @@ class CLIP:
|
|||||||
return self.patcher.add_patches(patches, strength)
|
return self.patcher.add_patches(patches, strength)
|
||||||
|
|
||||||
def clip_layer(self, layer_idx):
|
def clip_layer(self, layer_idx):
|
||||||
return self.cond_stage_model.clip_layer(layer_idx)
|
self.layer_idx = layer_idx
|
||||||
|
|
||||||
def encode(self, text):
|
def encode(self, text):
|
||||||
|
self.cond_stage_model.clip_layer(self.layer_idx)
|
||||||
tokens = self.tokenizer.tokenize_with_weights(text)
|
tokens = self.tokenizer.tokenize_with_weights(text)
|
||||||
try:
|
try:
|
||||||
self.patcher.patch_model()
|
self.patcher.patch_model()
|
||||||
@ -744,15 +747,13 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, e
|
|||||||
else:
|
else:
|
||||||
unet_config["num_heads"] = 8 #SD1.x
|
unet_config["num_heads"] = 8 #SD1.x
|
||||||
|
|
||||||
|
if unet_config["context_dim"] == 1024 and unet_config["in_channels"] == 4: #only SD2.x non inpainting models are v prediction
|
||||||
|
k = "model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm1.bias"
|
||||||
|
out = sd[k]
|
||||||
|
if torch.std(out, unbiased=False) > 0.09: # not sure how well this will actually work. I guess we will find out.
|
||||||
|
sd_config["parameterization"] = 'v'
|
||||||
|
|
||||||
model = instantiate_from_config(model_config)
|
model = instantiate_from_config(model_config)
|
||||||
model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)
|
model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)
|
||||||
|
|
||||||
if unet_config["context_dim"] == 1024 and unet_config["in_channels"] == 4: #only SD2.x non inpainting models are v prediction
|
|
||||||
cond = torch.zeros((1, 2, unet_config["context_dim"]), device="cpu")
|
|
||||||
x_in = torch.rand((1, unet_config["in_channels"], 8, 8), device="cpu", generator=torch.manual_seed(1))
|
|
||||||
out = model.apply_model(x_in, torch.tensor([999], device="cpu"), cond)
|
|
||||||
if out.mean() < -0.6: #mean of eps should be ~0 and mean of v prediction should be ~-1
|
|
||||||
model.parameterization = 'v'
|
|
||||||
|
|
||||||
return (ModelPatcher(model), clip, vae)
|
return (ModelPatcher(model), clip, vae)
|
||||||
|
17
nodes.py
17
nodes.py
@ -220,6 +220,22 @@ class CheckpointLoaderSimple:
|
|||||||
out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=CheckpointLoader.embedding_directory)
|
out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=CheckpointLoader.embedding_directory)
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
class CLIPSetLastLayer:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": { "clip": ("CLIP", ),
|
||||||
|
"stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
|
||||||
|
}}
|
||||||
|
RETURN_TYPES = ("CLIP",)
|
||||||
|
FUNCTION = "set_last_layer"
|
||||||
|
|
||||||
|
CATEGORY = "conditioning"
|
||||||
|
|
||||||
|
def set_last_layer(self, clip, stop_at_clip_layer):
|
||||||
|
clip = clip.clone()
|
||||||
|
clip.clip_layer(stop_at_clip_layer)
|
||||||
|
return (clip,)
|
||||||
|
|
||||||
class LoraLoader:
|
class LoraLoader:
|
||||||
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
|
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
|
||||||
lora_dir = os.path.join(models_dir, "loras")
|
lora_dir = os.path.join(models_dir, "loras")
|
||||||
@ -829,6 +845,7 @@ NODE_CLASS_MAPPINGS = {
|
|||||||
"KSampler": KSampler,
|
"KSampler": KSampler,
|
||||||
"CheckpointLoader": CheckpointLoader,
|
"CheckpointLoader": CheckpointLoader,
|
||||||
"CLIPTextEncode": CLIPTextEncode,
|
"CLIPTextEncode": CLIPTextEncode,
|
||||||
|
"CLIPSetLastLayer": CLIPSetLastLayer,
|
||||||
"VAEDecode": VAEDecode,
|
"VAEDecode": VAEDecode,
|
||||||
"VAEEncode": VAEEncode,
|
"VAEEncode": VAEEncode,
|
||||||
"VAEEncodeForInpaint": VAEEncodeForInpaint,
|
"VAEEncodeForInpaint": VAEEncodeForInpaint,
|
||||||
|
Loading…
Reference in New Issue
Block a user