mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-10 18:05:16 +00:00
809bcc8ceb
See _for_testing/unclip in the UI for the new nodes. unCLIPCheckpointLoader is used to load them. unCLIPConditioning is used to add the image cond and takes as input a CLIPVisionEncode output which has been moved to the conditioning section.
19 lines
419 B
JSON
19 lines
419 B
JSON
{
|
|
"attention_dropout": 0.0,
|
|
"dropout": 0.0,
|
|
"hidden_act": "gelu",
|
|
"hidden_size": 1280,
|
|
"image_size": 224,
|
|
"initializer_factor": 1.0,
|
|
"initializer_range": 0.02,
|
|
"intermediate_size": 5120,
|
|
"layer_norm_eps": 1e-05,
|
|
"model_type": "clip_vision_model",
|
|
"num_attention_heads": 16,
|
|
"num_channels": 3,
|
|
"num_hidden_layers": 32,
|
|
"patch_size": 14,
|
|
"projection_dim": 1024,
|
|
"torch_dtype": "float32"
|
|
}
|