mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-03-12 22:02:14 +00:00
A tiny bit of reorganizing.
This commit is contained in:
parent
47acb3d73e
commit
7ec1dd25a2
23
comfy_extras/clip_vision_config.json
Normal file
23
comfy_extras/clip_vision_config.json
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
{
|
||||||
|
"_name_or_path": "openai/clip-vit-large-patch14",
|
||||||
|
"architectures": [
|
||||||
|
"CLIPVisionModel"
|
||||||
|
],
|
||||||
|
"attention_dropout": 0.0,
|
||||||
|
"dropout": 0.0,
|
||||||
|
"hidden_act": "quick_gelu",
|
||||||
|
"hidden_size": 1024,
|
||||||
|
"image_size": 224,
|
||||||
|
"initializer_factor": 1.0,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 4096,
|
||||||
|
"layer_norm_eps": 1e-05,
|
||||||
|
"model_type": "clip_vision_model",
|
||||||
|
"num_attention_heads": 16,
|
||||||
|
"num_channels": 3,
|
||||||
|
"num_hidden_layers": 24,
|
||||||
|
"patch_size": 14,
|
||||||
|
"projection_dim": 768,
|
||||||
|
"torch_dtype": "float32",
|
||||||
|
"transformers_version": "4.24.0"
|
||||||
|
}
|
24
nodes.py
24
nodes.py
@ -395,10 +395,10 @@ class CLIPVisionEncode:
|
|||||||
return {"required": { "clip_vision": ("CLIP_VISION",),
|
return {"required": { "clip_vision": ("CLIP_VISION",),
|
||||||
"image": ("IMAGE",)
|
"image": ("IMAGE",)
|
||||||
}}
|
}}
|
||||||
RETURN_TYPES = ("CLIP_VISION_EMBED",)
|
RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
|
||||||
FUNCTION = "encode"
|
FUNCTION = "encode"
|
||||||
|
|
||||||
CATEGORY = "conditioning"
|
CATEGORY = "conditioning/style_model"
|
||||||
|
|
||||||
def encode(self, clip_vision, image):
|
def encode(self, clip_vision, image):
|
||||||
output = clip_vision.encode_image(image)
|
output = clip_vision.encode_image(image)
|
||||||
@ -425,16 +425,16 @@ class StyleModelLoader:
|
|||||||
class StyleModelApply:
|
class StyleModelApply:
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": {"clip_vision_embed": ("CLIP_VISION_EMBED", ),
|
return {"required": {"clip_vision_output": ("CLIP_VISION_OUTPUT", ),
|
||||||
"style_model": ("STYLE_MODEL", )
|
"style_model": ("STYLE_MODEL", )
|
||||||
}}
|
}}
|
||||||
RETURN_TYPES = ("CONDITIONING",)
|
RETURN_TYPES = ("CONDITIONING",)
|
||||||
FUNCTION = "apply_stylemodel"
|
FUNCTION = "apply_stylemodel"
|
||||||
|
|
||||||
CATEGORY = "conditioning"
|
CATEGORY = "conditioning/style_model"
|
||||||
|
|
||||||
def apply_stylemodel(self, clip_vision_embed, style_model):
|
def apply_stylemodel(self, clip_vision_output, style_model):
|
||||||
c = style_model.get_cond(clip_vision_embed)
|
c = style_model.get_cond(clip_vision_output)
|
||||||
return ([[c, {}]], )
|
return ([[c, {}]], )
|
||||||
|
|
||||||
|
|
||||||
@ -445,7 +445,7 @@ class ConditioningAppend:
|
|||||||
RETURN_TYPES = ("CONDITIONING",)
|
RETURN_TYPES = ("CONDITIONING",)
|
||||||
FUNCTION = "append"
|
FUNCTION = "append"
|
||||||
|
|
||||||
CATEGORY = "conditioning"
|
CATEGORY = "conditioning/style_model"
|
||||||
|
|
||||||
def append(self, conditioning_to, conditioning_from):
|
def append(self, conditioning_to, conditioning_from):
|
||||||
c = []
|
c = []
|
||||||
@ -504,7 +504,7 @@ class LatentRotate:
|
|||||||
RETURN_TYPES = ("LATENT",)
|
RETURN_TYPES = ("LATENT",)
|
||||||
FUNCTION = "rotate"
|
FUNCTION = "rotate"
|
||||||
|
|
||||||
CATEGORY = "latent"
|
CATEGORY = "latent/transform"
|
||||||
|
|
||||||
def rotate(self, samples, rotation):
|
def rotate(self, samples, rotation):
|
||||||
s = samples.copy()
|
s = samples.copy()
|
||||||
@ -528,7 +528,7 @@ class LatentFlip:
|
|||||||
RETURN_TYPES = ("LATENT",)
|
RETURN_TYPES = ("LATENT",)
|
||||||
FUNCTION = "flip"
|
FUNCTION = "flip"
|
||||||
|
|
||||||
CATEGORY = "latent"
|
CATEGORY = "latent/transform"
|
||||||
|
|
||||||
def flip(self, samples, flip_method):
|
def flip(self, samples, flip_method):
|
||||||
s = samples.copy()
|
s = samples.copy()
|
||||||
@ -593,7 +593,7 @@ class LatentCrop:
|
|||||||
RETURN_TYPES = ("LATENT",)
|
RETURN_TYPES = ("LATENT",)
|
||||||
FUNCTION = "crop"
|
FUNCTION = "crop"
|
||||||
|
|
||||||
CATEGORY = "latent"
|
CATEGORY = "latent/transform"
|
||||||
|
|
||||||
def crop(self, samples, width, height, x, y):
|
def crop(self, samples, width, height, x, y):
|
||||||
s = samples.copy()
|
s = samples.copy()
|
||||||
@ -951,8 +951,6 @@ NODE_CLASS_MAPPINGS = {
|
|||||||
"LatentCrop": LatentCrop,
|
"LatentCrop": LatentCrop,
|
||||||
"LoraLoader": LoraLoader,
|
"LoraLoader": LoraLoader,
|
||||||
"CLIPLoader": CLIPLoader,
|
"CLIPLoader": CLIPLoader,
|
||||||
"StyleModelLoader": StyleModelLoader,
|
|
||||||
"CLIPVisionLoader": CLIPVisionLoader,
|
|
||||||
"CLIPVisionEncode": CLIPVisionEncode,
|
"CLIPVisionEncode": CLIPVisionEncode,
|
||||||
"StyleModelApply":StyleModelApply,
|
"StyleModelApply":StyleModelApply,
|
||||||
"ConditioningAppend":ConditioningAppend,
|
"ConditioningAppend":ConditioningAppend,
|
||||||
@ -960,6 +958,8 @@ NODE_CLASS_MAPPINGS = {
|
|||||||
"ControlNetLoader": ControlNetLoader,
|
"ControlNetLoader": ControlNetLoader,
|
||||||
"DiffControlNetLoader": DiffControlNetLoader,
|
"DiffControlNetLoader": DiffControlNetLoader,
|
||||||
"T2IAdapterLoader": T2IAdapterLoader,
|
"T2IAdapterLoader": T2IAdapterLoader,
|
||||||
|
"StyleModelLoader": StyleModelLoader,
|
||||||
|
"CLIPVisionLoader": CLIPVisionLoader,
|
||||||
"VAEDecodeTiled": VAEDecodeTiled,
|
"VAEDecodeTiled": VAEDecodeTiled,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user