mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-25 15:55:18 +00:00
Merge branch 'comfyanonymous:master' into sa_solver
This commit is contained in:
commit
70ff03429c
4
.github/workflows/stable-release.yml
vendored
4
.github/workflows/stable-release.yml
vendored
@ -17,12 +17,12 @@ on:
|
||||
description: 'Python minor version'
|
||||
required: true
|
||||
type: string
|
||||
default: "11"
|
||||
default: "12"
|
||||
python_patch:
|
||||
description: 'Python patch version'
|
||||
required: true
|
||||
type: string
|
||||
default: "9"
|
||||
default: "7"
|
||||
|
||||
|
||||
jobs:
|
||||
|
@ -12,7 +12,7 @@ on:
|
||||
description: 'extra dependencies'
|
||||
required: false
|
||||
type: string
|
||||
default: "\"numpy<2\""
|
||||
default: ""
|
||||
cu:
|
||||
description: 'cuda version'
|
||||
required: true
|
||||
@ -23,13 +23,13 @@ on:
|
||||
description: 'python minor version'
|
||||
required: true
|
||||
type: string
|
||||
default: "11"
|
||||
default: "12"
|
||||
|
||||
python_patch:
|
||||
description: 'python patch version'
|
||||
required: true
|
||||
type: string
|
||||
default: "9"
|
||||
default: "7"
|
||||
# push:
|
||||
# branches:
|
||||
# - master
|
||||
|
@ -13,13 +13,13 @@ on:
|
||||
description: 'python minor version'
|
||||
required: true
|
||||
type: string
|
||||
default: "11"
|
||||
default: "12"
|
||||
|
||||
python_patch:
|
||||
description: 'python patch version'
|
||||
required: true
|
||||
type: string
|
||||
default: "9"
|
||||
default: "7"
|
||||
# push:
|
||||
# branches:
|
||||
# - master
|
||||
|
@ -127,6 +127,8 @@ To run it on services like paperspace, kaggle or colab you can use my [Jupyter N
|
||||
|
||||
## Manual Install (Windows, Linux)
|
||||
|
||||
Note that some dependencies do not yet support python 3.13 so using 3.12 is recommended.
|
||||
|
||||
Git clone this repo.
|
||||
|
||||
Put your SD checkpoints (the huge ckpt/safetensors files) in: models/checkpoints
|
||||
|
@ -151,6 +151,15 @@ class FrontendManager:
|
||||
return cls.DEFAULT_FRONTEND_PATH
|
||||
|
||||
repo_owner, repo_name, version = cls.parse_version_string(version_string)
|
||||
|
||||
if version.startswith("v"):
|
||||
expected_path = str(Path(cls.CUSTOM_FRONTENDS_ROOT) / f"{repo_owner}_{repo_name}" / version.lstrip("v"))
|
||||
if os.path.exists(expected_path):
|
||||
logging.info(f"Using existing copy of specific frontend version tag: {repo_owner}/{repo_name}@{version}")
|
||||
return expected_path
|
||||
|
||||
logging.info(f"Initializing frontend: {repo_owner}/{repo_name}@{version}, requesting version details from GitHub...")
|
||||
|
||||
provider = provider or FrontEndProvider(repo_owner, repo_name)
|
||||
release = provider.get_release(version)
|
||||
|
||||
|
@ -60,7 +60,7 @@ class StrengthType(Enum):
|
||||
LINEAR_UP = 2
|
||||
|
||||
class ControlBase:
|
||||
def __init__(self, device=None):
|
||||
def __init__(self):
|
||||
self.cond_hint_original = None
|
||||
self.cond_hint = None
|
||||
self.strength = 1.0
|
||||
@ -72,10 +72,6 @@ class ControlBase:
|
||||
self.compression_ratio = 8
|
||||
self.upscale_algorithm = 'nearest-exact'
|
||||
self.extra_args = {}
|
||||
|
||||
if device is None:
|
||||
device = comfy.model_management.get_torch_device()
|
||||
self.device = device
|
||||
self.previous_controlnet = None
|
||||
self.extra_conds = []
|
||||
self.strength_type = StrengthType.CONSTANT
|
||||
@ -185,8 +181,8 @@ class ControlBase:
|
||||
|
||||
|
||||
class ControlNet(ControlBase):
|
||||
def __init__(self, control_model=None, global_average_pooling=False, compression_ratio=8, latent_format=None, device=None, load_device=None, manual_cast_dtype=None, extra_conds=["y"], strength_type=StrengthType.CONSTANT, concat_mask=False):
|
||||
super().__init__(device)
|
||||
def __init__(self, control_model=None, global_average_pooling=False, compression_ratio=8, latent_format=None, load_device=None, manual_cast_dtype=None, extra_conds=["y"], strength_type=StrengthType.CONSTANT, concat_mask=False):
|
||||
super().__init__()
|
||||
self.control_model = control_model
|
||||
self.load_device = load_device
|
||||
if control_model is not None:
|
||||
@ -242,7 +238,7 @@ class ControlNet(ControlBase):
|
||||
to_concat.append(comfy.utils.repeat_to_batch_size(c, self.cond_hint.shape[0]))
|
||||
self.cond_hint = torch.cat([self.cond_hint] + to_concat, dim=1)
|
||||
|
||||
self.cond_hint = self.cond_hint.to(device=self.device, dtype=dtype)
|
||||
self.cond_hint = self.cond_hint.to(device=x_noisy.device, dtype=dtype)
|
||||
if x_noisy.shape[0] != self.cond_hint.shape[0]:
|
||||
self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
|
||||
|
||||
@ -341,8 +337,8 @@ class ControlLoraOps:
|
||||
|
||||
|
||||
class ControlLora(ControlNet):
|
||||
def __init__(self, control_weights, global_average_pooling=False, device=None, model_options={}): #TODO? model_options
|
||||
ControlBase.__init__(self, device)
|
||||
def __init__(self, control_weights, global_average_pooling=False, model_options={}): #TODO? model_options
|
||||
ControlBase.__init__(self)
|
||||
self.control_weights = control_weights
|
||||
self.global_average_pooling = global_average_pooling
|
||||
self.extra_conds += ["y"]
|
||||
@ -662,12 +658,15 @@ def load_controlnet(ckpt_path, model=None, model_options={}):
|
||||
|
||||
class T2IAdapter(ControlBase):
|
||||
def __init__(self, t2i_model, channels_in, compression_ratio, upscale_algorithm, device=None):
|
||||
super().__init__(device)
|
||||
super().__init__()
|
||||
self.t2i_model = t2i_model
|
||||
self.channels_in = channels_in
|
||||
self.control_input = None
|
||||
self.compression_ratio = compression_ratio
|
||||
self.upscale_algorithm = upscale_algorithm
|
||||
if device is None:
|
||||
device = comfy.model_management.get_torch_device()
|
||||
self.device = device
|
||||
|
||||
def scale_image_to(self, width, height):
|
||||
unshuffle_amount = self.t2i_model.unshuffle_amount
|
||||
|
@ -41,6 +41,8 @@ def manual_stochastic_round_to_float8(x, dtype, generator=None):
|
||||
(2.0 ** (-EXPONENT_BIAS + 1)) * abs_x
|
||||
)
|
||||
|
||||
inf = torch.finfo(dtype)
|
||||
torch.clamp(sign, min=inf.min, max=inf.max, out=sign)
|
||||
return sign
|
||||
|
||||
|
||||
|
@ -166,6 +166,8 @@ def sample_euler(model, x, sigmas, extra_args=None, callback=None, disable=None,
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_euler_ancestral(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
|
||||
if isinstance(model.inner_model.inner_model.model_sampling, comfy.model_sampling.CONST):
|
||||
return sample_euler_ancestral_RF(model, x, sigmas, extra_args, callback, disable, eta, s_noise, noise_sampler)
|
||||
"""Ancestral sampling with Euler method steps."""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
||||
@ -183,6 +185,29 @@ def sample_euler_ancestral(model, x, sigmas, extra_args=None, callback=None, dis
|
||||
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
|
||||
return x
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_euler_ancestral_RF(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1., noise_sampler=None):
|
||||
"""Ancestral sampling with Euler method steps."""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
||||
# sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
|
||||
downstep_ratio = 1 + (sigmas[i+1]/sigmas[i] - 1) * eta
|
||||
sigma_down = sigmas[i+1] * downstep_ratio
|
||||
alpha_ip1 = 1 - sigmas[i+1]
|
||||
alpha_down = 1 - sigma_down
|
||||
renoise_coeff = (sigmas[i+1]**2 - sigma_down**2*alpha_ip1**2/alpha_down**2)**0.5
|
||||
if callback is not None:
|
||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||
|
||||
# Euler method
|
||||
sigma_down_i_ratio = sigma_down / sigmas[i]
|
||||
x = sigma_down_i_ratio * x + (1 - sigma_down_i_ratio) * denoised
|
||||
if sigmas[i + 1] > 0 and eta > 0:
|
||||
x = (alpha_ip1/alpha_down) * x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * renoise_coeff
|
||||
return x
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_heun(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
|
||||
@ -1192,7 +1217,6 @@ def sample_euler_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disabl
|
||||
d = to_d(x, sigma_hat, temp[0])
|
||||
if callback is not None:
|
||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
||||
dt = sigmas[i + 1] - sigma_hat
|
||||
# Euler method
|
||||
x = denoised + d * sigmas[i + 1]
|
||||
return x
|
||||
@ -1219,7 +1243,6 @@ def sample_euler_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback=No
|
||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||
d = to_d(x, sigmas[i], temp[0])
|
||||
# Euler method
|
||||
dt = sigma_down - sigmas[i]
|
||||
x = denoised + d * sigma_down
|
||||
if sigmas[i + 1] > 0:
|
||||
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
|
||||
@ -1250,7 +1273,6 @@ def sample_dpmpp_2s_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback
|
||||
if sigma_down == 0:
|
||||
# Euler method
|
||||
d = to_d(x, sigmas[i], temp[0])
|
||||
dt = sigma_down - sigmas[i]
|
||||
x = denoised + d * sigma_down
|
||||
else:
|
||||
# DPM-Solver++(2S)
|
||||
@ -1298,4 +1320,4 @@ def sample_dpmpp_2m_cfg_pp(model, x, sigmas, extra_args=None, callback=None, dis
|
||||
denoised_mix = -torch.exp(-h) * uncond_denoised - torch.expm1(-h) * (1 / (2 * r)) * (denoised - old_uncond_denoised)
|
||||
x = denoised + denoised_mix + torch.exp(-h) * x
|
||||
old_uncond_denoised = uncond_denoised
|
||||
return x
|
||||
return x
|
||||
|
@ -175,3 +175,30 @@ class Flux(SD3):
|
||||
|
||||
def process_out(self, latent):
|
||||
return (latent / self.scale_factor) + self.shift_factor
|
||||
|
||||
class Mochi(LatentFormat):
|
||||
latent_channels = 12
|
||||
|
||||
def __init__(self):
|
||||
self.scale_factor = 1.0
|
||||
self.latents_mean = torch.tensor([-0.06730895953510081, -0.038011381506090416, -0.07477820912866141,
|
||||
-0.05565264470995561, 0.012767231469026969, -0.04703542746246419,
|
||||
0.043896967884726704, -0.09346305707025976, -0.09918314763016893,
|
||||
-0.008729793427399178, -0.011931556316503654, -0.0321993391887285]).view(1, self.latent_channels, 1, 1, 1)
|
||||
self.latents_std = torch.tensor([0.9263795028493863, 0.9248894543193766, 0.9393059390890617,
|
||||
0.959253732819592, 0.8244560132752793, 0.917259975397747,
|
||||
0.9294154431013696, 1.3720942357788521, 0.881393668867029,
|
||||
0.9168315692124348, 0.9185249279345552, 0.9274757570805041]).view(1, self.latent_channels, 1, 1, 1)
|
||||
|
||||
self.latent_rgb_factors = None #TODO
|
||||
self.taesd_decoder_name = None #TODO
|
||||
|
||||
def process_in(self, latent):
|
||||
latents_mean = self.latents_mean.to(latent.device, latent.dtype)
|
||||
latents_std = self.latents_std.to(latent.device, latent.dtype)
|
||||
return (latent - latents_mean) * self.scale_factor / latents_std
|
||||
|
||||
def process_out(self, latent):
|
||||
latents_mean = self.latents_mean.to(latent.device, latent.dtype)
|
||||
latents_std = self.latents_std.to(latent.device, latent.dtype)
|
||||
return latent * latents_std / self.scale_factor + latents_mean
|
||||
|
@ -13,9 +13,15 @@ try:
|
||||
except:
|
||||
rms_norm_torch = None
|
||||
|
||||
def rms_norm(x, weight, eps=1e-6):
|
||||
def rms_norm(x, weight=None, eps=1e-6):
|
||||
if rms_norm_torch is not None and not (torch.jit.is_tracing() or torch.jit.is_scripting()):
|
||||
return rms_norm_torch(x, weight.shape, weight=comfy.ops.cast_to(weight, dtype=x.dtype, device=x.device), eps=eps)
|
||||
if weight is None:
|
||||
return rms_norm_torch(x, (x.shape[-1],), eps=eps)
|
||||
else:
|
||||
return rms_norm_torch(x, weight.shape, weight=comfy.ops.cast_to(weight, dtype=x.dtype, device=x.device), eps=eps)
|
||||
else:
|
||||
rrms = torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + eps)
|
||||
return (x * rrms) * comfy.ops.cast_to(weight, dtype=x.dtype, device=x.device)
|
||||
r = x * torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + eps)
|
||||
if weight is None:
|
||||
return r
|
||||
else:
|
||||
return r * comfy.ops.cast_to(weight, dtype=x.dtype, device=x.device)
|
||||
|
541
comfy/ldm/genmo/joint_model/asymm_models_joint.py
Normal file
541
comfy/ldm/genmo/joint_model/asymm_models_joint.py
Normal file
@ -0,0 +1,541 @@
|
||||
#original code from https://github.com/genmoai/models under apache 2.0 license
|
||||
#adapted to ComfyUI
|
||||
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from einops import rearrange
|
||||
# from flash_attn import flash_attn_varlen_qkvpacked_func
|
||||
from comfy.ldm.modules.attention import optimized_attention
|
||||
|
||||
from .layers import (
|
||||
FeedForward,
|
||||
PatchEmbed,
|
||||
RMSNorm,
|
||||
TimestepEmbedder,
|
||||
)
|
||||
|
||||
from .rope_mixed import (
|
||||
compute_mixed_rotation,
|
||||
create_position_matrix,
|
||||
)
|
||||
from .temporal_rope import apply_rotary_emb_qk_real
|
||||
from .utils import (
|
||||
AttentionPool,
|
||||
modulate,
|
||||
)
|
||||
|
||||
import comfy.ldm.common_dit
|
||||
import comfy.ops
|
||||
|
||||
|
||||
def modulated_rmsnorm(x, scale, eps=1e-6):
|
||||
# Normalize and modulate
|
||||
x_normed = comfy.ldm.common_dit.rms_norm(x, eps=eps)
|
||||
x_modulated = x_normed * (1 + scale.unsqueeze(1))
|
||||
|
||||
return x_modulated
|
||||
|
||||
|
||||
def residual_tanh_gated_rmsnorm(x, x_res, gate, eps=1e-6):
|
||||
# Apply tanh to gate
|
||||
tanh_gate = torch.tanh(gate).unsqueeze(1)
|
||||
|
||||
# Normalize and apply gated scaling
|
||||
x_normed = comfy.ldm.common_dit.rms_norm(x_res, eps=eps) * tanh_gate
|
||||
|
||||
# Apply residual connection
|
||||
output = x + x_normed
|
||||
|
||||
return output
|
||||
|
||||
class AsymmetricAttention(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
dim_x: int,
|
||||
dim_y: int,
|
||||
num_heads: int = 8,
|
||||
qkv_bias: bool = True,
|
||||
qk_norm: bool = False,
|
||||
attn_drop: float = 0.0,
|
||||
update_y: bool = True,
|
||||
out_bias: bool = True,
|
||||
attend_to_padding: bool = False,
|
||||
softmax_scale: Optional[float] = None,
|
||||
device: Optional[torch.device] = None,
|
||||
dtype=None,
|
||||
operations=None,
|
||||
):
|
||||
super().__init__()
|
||||
self.dim_x = dim_x
|
||||
self.dim_y = dim_y
|
||||
self.num_heads = num_heads
|
||||
self.head_dim = dim_x // num_heads
|
||||
self.attn_drop = attn_drop
|
||||
self.update_y = update_y
|
||||
self.attend_to_padding = attend_to_padding
|
||||
self.softmax_scale = softmax_scale
|
||||
if dim_x % num_heads != 0:
|
||||
raise ValueError(
|
||||
f"dim_x={dim_x} should be divisible by num_heads={num_heads}"
|
||||
)
|
||||
|
||||
# Input layers.
|
||||
self.qkv_bias = qkv_bias
|
||||
self.qkv_x = operations.Linear(dim_x, 3 * dim_x, bias=qkv_bias, device=device, dtype=dtype)
|
||||
# Project text features to match visual features (dim_y -> dim_x)
|
||||
self.qkv_y = operations.Linear(dim_y, 3 * dim_x, bias=qkv_bias, device=device, dtype=dtype)
|
||||
|
||||
# Query and key normalization for stability.
|
||||
assert qk_norm
|
||||
self.q_norm_x = RMSNorm(self.head_dim, device=device, dtype=dtype)
|
||||
self.k_norm_x = RMSNorm(self.head_dim, device=device, dtype=dtype)
|
||||
self.q_norm_y = RMSNorm(self.head_dim, device=device, dtype=dtype)
|
||||
self.k_norm_y = RMSNorm(self.head_dim, device=device, dtype=dtype)
|
||||
|
||||
# Output layers. y features go back down from dim_x -> dim_y.
|
||||
self.proj_x = operations.Linear(dim_x, dim_x, bias=out_bias, device=device, dtype=dtype)
|
||||
self.proj_y = (
|
||||
operations.Linear(dim_x, dim_y, bias=out_bias, device=device, dtype=dtype)
|
||||
if update_y
|
||||
else nn.Identity()
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor, # (B, N, dim_x)
|
||||
y: torch.Tensor, # (B, L, dim_y)
|
||||
scale_x: torch.Tensor, # (B, dim_x), modulation for pre-RMSNorm.
|
||||
scale_y: torch.Tensor, # (B, dim_y), modulation for pre-RMSNorm.
|
||||
crop_y,
|
||||
**rope_rotation,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
rope_cos = rope_rotation.get("rope_cos")
|
||||
rope_sin = rope_rotation.get("rope_sin")
|
||||
# Pre-norm for visual features
|
||||
x = modulated_rmsnorm(x, scale_x) # (B, M, dim_x) where M = N / cp_group_size
|
||||
|
||||
# Process visual features
|
||||
# qkv_x = self.qkv_x(x) # (B, M, 3 * dim_x)
|
||||
# assert qkv_x.dtype == torch.bfloat16
|
||||
# qkv_x = all_to_all_collect_tokens(
|
||||
# qkv_x, self.num_heads
|
||||
# ) # (3, B, N, local_h, head_dim)
|
||||
|
||||
# Process text features
|
||||
y = modulated_rmsnorm(y, scale_y) # (B, L, dim_y)
|
||||
q_y, k_y, v_y = self.qkv_y(y).view(y.shape[0], y.shape[1], 3, self.num_heads, -1).unbind(2) # (B, N, local_h, head_dim)
|
||||
|
||||
q_y = self.q_norm_y(q_y)
|
||||
k_y = self.k_norm_y(k_y)
|
||||
|
||||
# Split qkv_x into q, k, v
|
||||
q_x, k_x, v_x = self.qkv_x(x).view(x.shape[0], x.shape[1], 3, self.num_heads, -1).unbind(2) # (B, N, local_h, head_dim)
|
||||
q_x = self.q_norm_x(q_x)
|
||||
q_x = apply_rotary_emb_qk_real(q_x, rope_cos, rope_sin)
|
||||
k_x = self.k_norm_x(k_x)
|
||||
k_x = apply_rotary_emb_qk_real(k_x, rope_cos, rope_sin)
|
||||
|
||||
q = torch.cat([q_x, q_y[:, :crop_y]], dim=1).transpose(1, 2)
|
||||
k = torch.cat([k_x, k_y[:, :crop_y]], dim=1).transpose(1, 2)
|
||||
v = torch.cat([v_x, v_y[:, :crop_y]], dim=1).transpose(1, 2)
|
||||
|
||||
xy = optimized_attention(q,
|
||||
k,
|
||||
v, self.num_heads, skip_reshape=True)
|
||||
|
||||
x, y = torch.tensor_split(xy, (q_x.shape[1],), dim=1)
|
||||
x = self.proj_x(x)
|
||||
o = torch.zeros(y.shape[0], q_y.shape[1], y.shape[-1], device=y.device, dtype=y.dtype)
|
||||
o[:, :y.shape[1]] = y
|
||||
|
||||
y = self.proj_y(o)
|
||||
# print("ox", x)
|
||||
# print("oy", y)
|
||||
return x, y
|
||||
|
||||
|
||||
class AsymmetricJointBlock(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size_x: int,
|
||||
hidden_size_y: int,
|
||||
num_heads: int,
|
||||
*,
|
||||
mlp_ratio_x: float = 8.0, # Ratio of hidden size to d_model for MLP for visual tokens.
|
||||
mlp_ratio_y: float = 4.0, # Ratio of hidden size to d_model for MLP for text tokens.
|
||||
update_y: bool = True, # Whether to update text tokens in this block.
|
||||
device: Optional[torch.device] = None,
|
||||
dtype=None,
|
||||
operations=None,
|
||||
**block_kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
self.update_y = update_y
|
||||
self.hidden_size_x = hidden_size_x
|
||||
self.hidden_size_y = hidden_size_y
|
||||
self.mod_x = operations.Linear(hidden_size_x, 4 * hidden_size_x, device=device, dtype=dtype)
|
||||
if self.update_y:
|
||||
self.mod_y = operations.Linear(hidden_size_x, 4 * hidden_size_y, device=device, dtype=dtype)
|
||||
else:
|
||||
self.mod_y = operations.Linear(hidden_size_x, hidden_size_y, device=device, dtype=dtype)
|
||||
|
||||
# Self-attention:
|
||||
self.attn = AsymmetricAttention(
|
||||
hidden_size_x,
|
||||
hidden_size_y,
|
||||
num_heads=num_heads,
|
||||
update_y=update_y,
|
||||
device=device,
|
||||
dtype=dtype,
|
||||
operations=operations,
|
||||
**block_kwargs,
|
||||
)
|
||||
|
||||
# MLP.
|
||||
mlp_hidden_dim_x = int(hidden_size_x * mlp_ratio_x)
|
||||
assert mlp_hidden_dim_x == int(1536 * 8)
|
||||
self.mlp_x = FeedForward(
|
||||
in_features=hidden_size_x,
|
||||
hidden_size=mlp_hidden_dim_x,
|
||||
multiple_of=256,
|
||||
ffn_dim_multiplier=None,
|
||||
device=device,
|
||||
dtype=dtype,
|
||||
operations=operations,
|
||||
)
|
||||
|
||||
# MLP for text not needed in last block.
|
||||
if self.update_y:
|
||||
mlp_hidden_dim_y = int(hidden_size_y * mlp_ratio_y)
|
||||
self.mlp_y = FeedForward(
|
||||
in_features=hidden_size_y,
|
||||
hidden_size=mlp_hidden_dim_y,
|
||||
multiple_of=256,
|
||||
ffn_dim_multiplier=None,
|
||||
device=device,
|
||||
dtype=dtype,
|
||||
operations=operations,
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
c: torch.Tensor,
|
||||
y: torch.Tensor,
|
||||
**attn_kwargs,
|
||||
):
|
||||
"""Forward pass of a block.
|
||||
|
||||
Args:
|
||||
x: (B, N, dim) tensor of visual tokens
|
||||
c: (B, dim) tensor of conditioned features
|
||||
y: (B, L, dim) tensor of text tokens
|
||||
num_frames: Number of frames in the video. N = num_frames * num_spatial_tokens
|
||||
|
||||
Returns:
|
||||
x: (B, N, dim) tensor of visual tokens after block
|
||||
y: (B, L, dim) tensor of text tokens after block
|
||||
"""
|
||||
N = x.size(1)
|
||||
|
||||
c = F.silu(c)
|
||||
mod_x = self.mod_x(c)
|
||||
scale_msa_x, gate_msa_x, scale_mlp_x, gate_mlp_x = mod_x.chunk(4, dim=1)
|
||||
|
||||
mod_y = self.mod_y(c)
|
||||
if self.update_y:
|
||||
scale_msa_y, gate_msa_y, scale_mlp_y, gate_mlp_y = mod_y.chunk(4, dim=1)
|
||||
else:
|
||||
scale_msa_y = mod_y
|
||||
|
||||
# Self-attention block.
|
||||
x_attn, y_attn = self.attn(
|
||||
x,
|
||||
y,
|
||||
scale_x=scale_msa_x,
|
||||
scale_y=scale_msa_y,
|
||||
**attn_kwargs,
|
||||
)
|
||||
|
||||
assert x_attn.size(1) == N
|
||||
x = residual_tanh_gated_rmsnorm(x, x_attn, gate_msa_x)
|
||||
if self.update_y:
|
||||
y = residual_tanh_gated_rmsnorm(y, y_attn, gate_msa_y)
|
||||
|
||||
# MLP block.
|
||||
x = self.ff_block_x(x, scale_mlp_x, gate_mlp_x)
|
||||
if self.update_y:
|
||||
y = self.ff_block_y(y, scale_mlp_y, gate_mlp_y)
|
||||
|
||||
return x, y
|
||||
|
||||
def ff_block_x(self, x, scale_x, gate_x):
|
||||
x_mod = modulated_rmsnorm(x, scale_x)
|
||||
x_res = self.mlp_x(x_mod)
|
||||
x = residual_tanh_gated_rmsnorm(x, x_res, gate_x) # Sandwich norm
|
||||
return x
|
||||
|
||||
def ff_block_y(self, y, scale_y, gate_y):
|
||||
y_mod = modulated_rmsnorm(y, scale_y)
|
||||
y_res = self.mlp_y(y_mod)
|
||||
y = residual_tanh_gated_rmsnorm(y, y_res, gate_y) # Sandwich norm
|
||||
return y
|
||||
|
||||
|
||||
class FinalLayer(nn.Module):
|
||||
"""
|
||||
The final layer of DiT.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size,
|
||||
patch_size,
|
||||
out_channels,
|
||||
device: Optional[torch.device] = None,
|
||||
dtype=None,
|
||||
operations=None,
|
||||
):
|
||||
super().__init__()
|
||||
self.norm_final = operations.LayerNorm(
|
||||
hidden_size, elementwise_affine=False, eps=1e-6, device=device, dtype=dtype
|
||||
)
|
||||
self.mod = operations.Linear(hidden_size, 2 * hidden_size, device=device, dtype=dtype)
|
||||
self.linear = operations.Linear(
|
||||
hidden_size, patch_size * patch_size * out_channels, device=device, dtype=dtype
|
||||
)
|
||||
|
||||
def forward(self, x, c):
|
||||
c = F.silu(c)
|
||||
shift, scale = self.mod(c).chunk(2, dim=1)
|
||||
x = modulate(self.norm_final(x), shift, scale)
|
||||
x = self.linear(x)
|
||||
return x
|
||||
|
||||
|
||||
class AsymmDiTJoint(nn.Module):
|
||||
"""
|
||||
Diffusion model with a Transformer backbone.
|
||||
|
||||
Ingests text embeddings instead of a label.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
patch_size=2,
|
||||
in_channels=4,
|
||||
hidden_size_x=1152,
|
||||
hidden_size_y=1152,
|
||||
depth=48,
|
||||
num_heads=16,
|
||||
mlp_ratio_x=8.0,
|
||||
mlp_ratio_y=4.0,
|
||||
use_t5: bool = False,
|
||||
t5_feat_dim: int = 4096,
|
||||
t5_token_length: int = 256,
|
||||
learn_sigma=True,
|
||||
patch_embed_bias: bool = True,
|
||||
timestep_mlp_bias: bool = True,
|
||||
attend_to_padding: bool = False,
|
||||
timestep_scale: Optional[float] = None,
|
||||
use_extended_posenc: bool = False,
|
||||
posenc_preserve_area: bool = False,
|
||||
rope_theta: float = 10000.0,
|
||||
image_model=None,
|
||||
device: Optional[torch.device] = None,
|
||||
dtype=None,
|
||||
operations=None,
|
||||
**block_kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
self.dtype = dtype
|
||||
self.learn_sigma = learn_sigma
|
||||
self.in_channels = in_channels
|
||||
self.out_channels = in_channels * 2 if learn_sigma else in_channels
|
||||
self.patch_size = patch_size
|
||||
self.num_heads = num_heads
|
||||
self.hidden_size_x = hidden_size_x
|
||||
self.hidden_size_y = hidden_size_y
|
||||
self.head_dim = (
|
||||
hidden_size_x // num_heads
|
||||
) # Head dimension and count is determined by visual.
|
||||
self.attend_to_padding = attend_to_padding
|
||||
self.use_extended_posenc = use_extended_posenc
|
||||
self.posenc_preserve_area = posenc_preserve_area
|
||||
self.use_t5 = use_t5
|
||||
self.t5_token_length = t5_token_length
|
||||
self.t5_feat_dim = t5_feat_dim
|
||||
self.rope_theta = (
|
||||
rope_theta # Scaling factor for frequency computation for temporal RoPE.
|
||||
)
|
||||
|
||||
self.x_embedder = PatchEmbed(
|
||||
patch_size=patch_size,
|
||||
in_chans=in_channels,
|
||||
embed_dim=hidden_size_x,
|
||||
bias=patch_embed_bias,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
operations=operations
|
||||
)
|
||||
# Conditionings
|
||||
# Timestep
|
||||
self.t_embedder = TimestepEmbedder(
|
||||
hidden_size_x, bias=timestep_mlp_bias, timestep_scale=timestep_scale, dtype=dtype, device=device, operations=operations
|
||||
)
|
||||
|
||||
if self.use_t5:
|
||||
# Caption Pooling (T5)
|
||||
self.t5_y_embedder = AttentionPool(
|
||||
t5_feat_dim, num_heads=8, output_dim=hidden_size_x, dtype=dtype, device=device, operations=operations
|
||||
)
|
||||
|
||||
# Dense Embedding Projection (T5)
|
||||
self.t5_yproj = operations.Linear(
|
||||
t5_feat_dim, hidden_size_y, bias=True, dtype=dtype, device=device
|
||||
)
|
||||
|
||||
# Initialize pos_frequencies as an empty parameter.
|
||||
self.pos_frequencies = nn.Parameter(
|
||||
torch.empty(3, self.num_heads, self.head_dim // 2, dtype=dtype, device=device)
|
||||
)
|
||||
|
||||
assert not self.attend_to_padding
|
||||
|
||||
# for depth 48:
|
||||
# b = 0: AsymmetricJointBlock, update_y=True
|
||||
# b = 1: AsymmetricJointBlock, update_y=True
|
||||
# ...
|
||||
# b = 46: AsymmetricJointBlock, update_y=True
|
||||
# b = 47: AsymmetricJointBlock, update_y=False. No need to update text features.
|
||||
blocks = []
|
||||
for b in range(depth):
|
||||
# Joint multi-modal block
|
||||
update_y = b < depth - 1
|
||||
block = AsymmetricJointBlock(
|
||||
hidden_size_x,
|
||||
hidden_size_y,
|
||||
num_heads,
|
||||
mlp_ratio_x=mlp_ratio_x,
|
||||
mlp_ratio_y=mlp_ratio_y,
|
||||
update_y=update_y,
|
||||
attend_to_padding=attend_to_padding,
|
||||
device=device,
|
||||
dtype=dtype,
|
||||
operations=operations,
|
||||
**block_kwargs,
|
||||
)
|
||||
|
||||
blocks.append(block)
|
||||
self.blocks = nn.ModuleList(blocks)
|
||||
|
||||
self.final_layer = FinalLayer(
|
||||
hidden_size_x, patch_size, self.out_channels, dtype=dtype, device=device, operations=operations
|
||||
)
|
||||
|
||||
def embed_x(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
Args:
|
||||
x: (B, C=12, T, H, W) tensor of visual tokens
|
||||
|
||||
Returns:
|
||||
x: (B, C=3072, N) tensor of visual tokens with positional embedding.
|
||||
"""
|
||||
return self.x_embedder(x) # Convert BcTHW to BCN
|
||||
|
||||
def prepare(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
sigma: torch.Tensor,
|
||||
t5_feat: torch.Tensor,
|
||||
t5_mask: torch.Tensor,
|
||||
):
|
||||
"""Prepare input and conditioning embeddings."""
|
||||
# Visual patch embeddings with positional encoding.
|
||||
T, H, W = x.shape[-3:]
|
||||
pH, pW = H // self.patch_size, W // self.patch_size
|
||||
x = self.embed_x(x) # (B, N, D), where N = T * H * W / patch_size ** 2
|
||||
assert x.ndim == 3
|
||||
B = x.size(0)
|
||||
|
||||
|
||||
pH, pW = H // self.patch_size, W // self.patch_size
|
||||
N = T * pH * pW
|
||||
assert x.size(1) == N
|
||||
pos = create_position_matrix(
|
||||
T, pH=pH, pW=pW, device=x.device, dtype=torch.float32
|
||||
) # (N, 3)
|
||||
rope_cos, rope_sin = compute_mixed_rotation(
|
||||
freqs=comfy.ops.cast_to(self.pos_frequencies, dtype=x.dtype, device=x.device), pos=pos
|
||||
) # Each are (N, num_heads, dim // 2)
|
||||
|
||||
c_t = self.t_embedder(1 - sigma, out_dtype=x.dtype) # (B, D)
|
||||
|
||||
t5_y_pool = self.t5_y_embedder(t5_feat, t5_mask) # (B, D)
|
||||
|
||||
c = c_t + t5_y_pool
|
||||
|
||||
y_feat = self.t5_yproj(t5_feat) # (B, L, t5_feat_dim) --> (B, L, D)
|
||||
|
||||
return x, c, y_feat, rope_cos, rope_sin
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
timestep: torch.Tensor,
|
||||
context: List[torch.Tensor],
|
||||
attention_mask: List[torch.Tensor],
|
||||
num_tokens=256,
|
||||
packed_indices: Dict[str, torch.Tensor] = None,
|
||||
rope_cos: torch.Tensor = None,
|
||||
rope_sin: torch.Tensor = None,
|
||||
control=None, **kwargs
|
||||
):
|
||||
y_feat = context
|
||||
y_mask = attention_mask
|
||||
sigma = timestep
|
||||
"""Forward pass of DiT.
|
||||
|
||||
Args:
|
||||
x: (B, C, T, H, W) tensor of spatial inputs (images or latent representations of images)
|
||||
sigma: (B,) tensor of noise standard deviations
|
||||
y_feat: List((B, L, y_feat_dim) tensor of caption token features. For SDXL text encoders: L=77, y_feat_dim=2048)
|
||||
y_mask: List((B, L) boolean tensor indicating which tokens are not padding)
|
||||
packed_indices: Dict with keys for Flash Attention. Result of compute_packed_indices.
|
||||
"""
|
||||
B, _, T, H, W = x.shape
|
||||
|
||||
x, c, y_feat, rope_cos, rope_sin = self.prepare(
|
||||
x, sigma, y_feat, y_mask
|
||||
)
|
||||
del y_mask
|
||||
|
||||
for i, block in enumerate(self.blocks):
|
||||
x, y_feat = block(
|
||||
x,
|
||||
c,
|
||||
y_feat,
|
||||
rope_cos=rope_cos,
|
||||
rope_sin=rope_sin,
|
||||
crop_y=num_tokens,
|
||||
) # (B, M, D), (B, L, D)
|
||||
del y_feat # Final layers don't use dense text features.
|
||||
|
||||
x = self.final_layer(x, c) # (B, M, patch_size ** 2 * out_channels)
|
||||
x = rearrange(
|
||||
x,
|
||||
"B (T hp wp) (p1 p2 c) -> B c T (hp p1) (wp p2)",
|
||||
T=T,
|
||||
hp=H // self.patch_size,
|
||||
wp=W // self.patch_size,
|
||||
p1=self.patch_size,
|
||||
p2=self.patch_size,
|
||||
c=self.out_channels,
|
||||
)
|
||||
|
||||
return -x
|
164
comfy/ldm/genmo/joint_model/layers.py
Normal file
164
comfy/ldm/genmo/joint_model/layers.py
Normal file
@ -0,0 +1,164 @@
|
||||
#original code from https://github.com/genmoai/models under apache 2.0 license
|
||||
#adapted to ComfyUI
|
||||
|
||||
import collections.abc
|
||||
import math
|
||||
from itertools import repeat
|
||||
from typing import Callable, Optional
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from einops import rearrange
|
||||
import comfy.ldm.common_dit
|
||||
|
||||
|
||||
# From PyTorch internals
|
||||
def _ntuple(n):
|
||||
def parse(x):
|
||||
if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
|
||||
return tuple(x)
|
||||
return tuple(repeat(x, n))
|
||||
|
||||
return parse
|
||||
|
||||
|
||||
to_2tuple = _ntuple(2)
|
||||
|
||||
|
||||
class TimestepEmbedder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
hidden_size: int,
|
||||
frequency_embedding_size: int = 256,
|
||||
*,
|
||||
bias: bool = True,
|
||||
timestep_scale: Optional[float] = None,
|
||||
dtype=None,
|
||||
device=None,
|
||||
operations=None,
|
||||
):
|
||||
super().__init__()
|
||||
self.mlp = nn.Sequential(
|
||||
operations.Linear(frequency_embedding_size, hidden_size, bias=bias, dtype=dtype, device=device),
|
||||
nn.SiLU(),
|
||||
operations.Linear(hidden_size, hidden_size, bias=bias, dtype=dtype, device=device),
|
||||
)
|
||||
self.frequency_embedding_size = frequency_embedding_size
|
||||
self.timestep_scale = timestep_scale
|
||||
|
||||
@staticmethod
|
||||
def timestep_embedding(t, dim, max_period=10000):
|
||||
half = dim // 2
|
||||
freqs = torch.arange(start=0, end=half, dtype=torch.float32, device=t.device)
|
||||
freqs.mul_(-math.log(max_period) / half).exp_()
|
||||
args = t[:, None].float() * freqs[None]
|
||||
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
||||
if dim % 2:
|
||||
embedding = torch.cat(
|
||||
[embedding, torch.zeros_like(embedding[:, :1])], dim=-1
|
||||
)
|
||||
return embedding
|
||||
|
||||
def forward(self, t, out_dtype):
|
||||
if self.timestep_scale is not None:
|
||||
t = t * self.timestep_scale
|
||||
t_freq = self.timestep_embedding(t, self.frequency_embedding_size).to(dtype=out_dtype)
|
||||
t_emb = self.mlp(t_freq)
|
||||
return t_emb
|
||||
|
||||
|
||||
class FeedForward(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
in_features: int,
|
||||
hidden_size: int,
|
||||
multiple_of: int,
|
||||
ffn_dim_multiplier: Optional[float],
|
||||
device: Optional[torch.device] = None,
|
||||
dtype=None,
|
||||
operations=None,
|
||||
):
|
||||
super().__init__()
|
||||
# keep parameter count and computation constant compared to standard FFN
|
||||
hidden_size = int(2 * hidden_size / 3)
|
||||
# custom dim factor multiplier
|
||||
if ffn_dim_multiplier is not None:
|
||||
hidden_size = int(ffn_dim_multiplier * hidden_size)
|
||||
hidden_size = multiple_of * ((hidden_size + multiple_of - 1) // multiple_of)
|
||||
|
||||
self.hidden_dim = hidden_size
|
||||
self.w1 = operations.Linear(in_features, 2 * hidden_size, bias=False, device=device, dtype=dtype)
|
||||
self.w2 = operations.Linear(hidden_size, in_features, bias=False, device=device, dtype=dtype)
|
||||
|
||||
def forward(self, x):
|
||||
x, gate = self.w1(x).chunk(2, dim=-1)
|
||||
x = self.w2(F.silu(x) * gate)
|
||||
return x
|
||||
|
||||
|
||||
class PatchEmbed(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
patch_size: int = 16,
|
||||
in_chans: int = 3,
|
||||
embed_dim: int = 768,
|
||||
norm_layer: Optional[Callable] = None,
|
||||
flatten: bool = True,
|
||||
bias: bool = True,
|
||||
dynamic_img_pad: bool = False,
|
||||
dtype=None,
|
||||
device=None,
|
||||
operations=None,
|
||||
):
|
||||
super().__init__()
|
||||
self.patch_size = to_2tuple(patch_size)
|
||||
self.flatten = flatten
|
||||
self.dynamic_img_pad = dynamic_img_pad
|
||||
|
||||
self.proj = operations.Conv2d(
|
||||
in_chans,
|
||||
embed_dim,
|
||||
kernel_size=patch_size,
|
||||
stride=patch_size,
|
||||
bias=bias,
|
||||
device=device,
|
||||
dtype=dtype,
|
||||
)
|
||||
assert norm_layer is None
|
||||
self.norm = (
|
||||
norm_layer(embed_dim, device=device) if norm_layer else nn.Identity()
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
B, _C, T, H, W = x.shape
|
||||
if not self.dynamic_img_pad:
|
||||
assert H % self.patch_size[0] == 0, f"Input height ({H}) should be divisible by patch size ({self.patch_size[0]})."
|
||||
assert W % self.patch_size[1] == 0, f"Input width ({W}) should be divisible by patch size ({self.patch_size[1]})."
|
||||
else:
|
||||
pad_h = (self.patch_size[0] - H % self.patch_size[0]) % self.patch_size[0]
|
||||
pad_w = (self.patch_size[1] - W % self.patch_size[1]) % self.patch_size[1]
|
||||
x = F.pad(x, (0, pad_w, 0, pad_h))
|
||||
|
||||
x = rearrange(x, "B C T H W -> (B T) C H W", B=B, T=T)
|
||||
x = comfy.ldm.common_dit.pad_to_patch_size(x, self.patch_size, padding_mode='circular')
|
||||
x = self.proj(x)
|
||||
|
||||
# Flatten temporal and spatial dimensions.
|
||||
if not self.flatten:
|
||||
raise NotImplementedError("Must flatten output.")
|
||||
x = rearrange(x, "(B T) C H W -> B (T H W) C", B=B, T=T)
|
||||
|
||||
x = self.norm(x)
|
||||
return x
|
||||
|
||||
|
||||
class RMSNorm(torch.nn.Module):
|
||||
def __init__(self, hidden_size, eps=1e-5, device=None, dtype=None):
|
||||
super().__init__()
|
||||
self.eps = eps
|
||||
self.weight = torch.nn.Parameter(torch.empty(hidden_size, device=device, dtype=dtype))
|
||||
self.register_parameter("bias", None)
|
||||
|
||||
def forward(self, x):
|
||||
return comfy.ldm.common_dit.rms_norm(x, self.weight, self.eps)
|
88
comfy/ldm/genmo/joint_model/rope_mixed.py
Normal file
88
comfy/ldm/genmo/joint_model/rope_mixed.py
Normal file
@ -0,0 +1,88 @@
|
||||
#original code from https://github.com/genmoai/models under apache 2.0 license
|
||||
|
||||
# import functools
|
||||
import math
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
def centers(start: float, stop, num, dtype=None, device=None):
|
||||
"""linspace through bin centers.
|
||||
|
||||
Args:
|
||||
start (float): Start of the range.
|
||||
stop (float): End of the range.
|
||||
num (int): Number of points.
|
||||
dtype (torch.dtype): Data type of the points.
|
||||
device (torch.device): Device of the points.
|
||||
|
||||
Returns:
|
||||
centers (Tensor): Centers of the bins. Shape: (num,).
|
||||
"""
|
||||
edges = torch.linspace(start, stop, num + 1, dtype=dtype, device=device)
|
||||
return (edges[:-1] + edges[1:]) / 2
|
||||
|
||||
|
||||
# @functools.lru_cache(maxsize=1)
|
||||
def create_position_matrix(
|
||||
T: int,
|
||||
pH: int,
|
||||
pW: int,
|
||||
device: torch.device,
|
||||
dtype: torch.dtype,
|
||||
*,
|
||||
target_area: float = 36864,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
T: int - Temporal dimension
|
||||
pH: int - Height dimension after patchify
|
||||
pW: int - Width dimension after patchify
|
||||
|
||||
Returns:
|
||||
pos: [T * pH * pW, 3] - position matrix
|
||||
"""
|
||||
# Create 1D tensors for each dimension
|
||||
t = torch.arange(T, dtype=dtype)
|
||||
|
||||
# Positionally interpolate to area 36864.
|
||||
# (3072x3072 frame with 16x16 patches = 192x192 latents).
|
||||
# This automatically scales rope positions when the resolution changes.
|
||||
# We use a large target area so the model is more sensitive
|
||||
# to changes in the learned pos_frequencies matrix.
|
||||
scale = math.sqrt(target_area / (pW * pH))
|
||||
w = centers(-pW * scale / 2, pW * scale / 2, pW)
|
||||
h = centers(-pH * scale / 2, pH * scale / 2, pH)
|
||||
|
||||
# Use meshgrid to create 3D grids
|
||||
grid_t, grid_h, grid_w = torch.meshgrid(t, h, w, indexing="ij")
|
||||
|
||||
# Stack and reshape the grids.
|
||||
pos = torch.stack([grid_t, grid_h, grid_w], dim=-1) # [T, pH, pW, 3]
|
||||
pos = pos.view(-1, 3) # [T * pH * pW, 3]
|
||||
pos = pos.to(dtype=dtype, device=device)
|
||||
|
||||
return pos
|
||||
|
||||
|
||||
def compute_mixed_rotation(
|
||||
freqs: torch.Tensor,
|
||||
pos: torch.Tensor,
|
||||
):
|
||||
"""
|
||||
Project each 3-dim position into per-head, per-head-dim 1D frequencies.
|
||||
|
||||
Args:
|
||||
freqs: [3, num_heads, num_freqs] - learned rotation frequency (for t, row, col) for each head position
|
||||
pos: [N, 3] - position of each token
|
||||
num_heads: int
|
||||
|
||||
Returns:
|
||||
freqs_cos: [N, num_heads, num_freqs] - cosine components
|
||||
freqs_sin: [N, num_heads, num_freqs] - sine components
|
||||
"""
|
||||
assert freqs.ndim == 3
|
||||
freqs_sum = torch.einsum("Nd,dhf->Nhf", pos.to(freqs), freqs)
|
||||
freqs_cos = torch.cos(freqs_sum)
|
||||
freqs_sin = torch.sin(freqs_sum)
|
||||
return freqs_cos, freqs_sin
|
34
comfy/ldm/genmo/joint_model/temporal_rope.py
Normal file
34
comfy/ldm/genmo/joint_model/temporal_rope.py
Normal file
@ -0,0 +1,34 @@
|
||||
#original code from https://github.com/genmoai/models under apache 2.0 license
|
||||
|
||||
# Based on Llama3 Implementation.
|
||||
import torch
|
||||
|
||||
|
||||
def apply_rotary_emb_qk_real(
|
||||
xqk: torch.Tensor,
|
||||
freqs_cos: torch.Tensor,
|
||||
freqs_sin: torch.Tensor,
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Apply rotary embeddings to input tensors using the given frequency tensor without complex numbers.
|
||||
|
||||
Args:
|
||||
xqk (torch.Tensor): Query and/or Key tensors to apply rotary embeddings. Shape: (B, S, *, num_heads, D)
|
||||
Can be either just query or just key, or both stacked along some batch or * dim.
|
||||
freqs_cos (torch.Tensor): Precomputed cosine frequency tensor.
|
||||
freqs_sin (torch.Tensor): Precomputed sine frequency tensor.
|
||||
|
||||
Returns:
|
||||
torch.Tensor: The input tensor with rotary embeddings applied.
|
||||
"""
|
||||
# Split the last dimension into even and odd parts
|
||||
xqk_even = xqk[..., 0::2]
|
||||
xqk_odd = xqk[..., 1::2]
|
||||
|
||||
# Apply rotation
|
||||
cos_part = (xqk_even * freqs_cos - xqk_odd * freqs_sin).type_as(xqk)
|
||||
sin_part = (xqk_even * freqs_sin + xqk_odd * freqs_cos).type_as(xqk)
|
||||
|
||||
# Interleave the results back into the original shape
|
||||
out = torch.stack([cos_part, sin_part], dim=-1).flatten(-2)
|
||||
return out
|
102
comfy/ldm/genmo/joint_model/utils.py
Normal file
102
comfy/ldm/genmo/joint_model/utils.py
Normal file
@ -0,0 +1,102 @@
|
||||
#original code from https://github.com/genmoai/models under apache 2.0 license
|
||||
#adapted to ComfyUI
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
def modulate(x, shift, scale):
|
||||
return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
|
||||
|
||||
|
||||
def pool_tokens(x: torch.Tensor, mask: torch.Tensor, *, keepdim=False) -> torch.Tensor:
|
||||
"""
|
||||
Pool tokens in x using mask.
|
||||
|
||||
NOTE: We assume x does not require gradients.
|
||||
|
||||
Args:
|
||||
x: (B, L, D) tensor of tokens.
|
||||
mask: (B, L) boolean tensor indicating which tokens are not padding.
|
||||
|
||||
Returns:
|
||||
pooled: (B, D) tensor of pooled tokens.
|
||||
"""
|
||||
assert x.size(1) == mask.size(1) # Expected mask to have same length as tokens.
|
||||
assert x.size(0) == mask.size(0) # Expected mask to have same batch size as tokens.
|
||||
mask = mask[:, :, None].to(dtype=x.dtype)
|
||||
mask = mask / mask.sum(dim=1, keepdim=True).clamp(min=1)
|
||||
pooled = (x * mask).sum(dim=1, keepdim=keepdim)
|
||||
return pooled
|
||||
|
||||
|
||||
class AttentionPool(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
embed_dim: int,
|
||||
num_heads: int,
|
||||
output_dim: int = None,
|
||||
device: Optional[torch.device] = None,
|
||||
dtype=None,
|
||||
operations=None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
spatial_dim (int): Number of tokens in sequence length.
|
||||
embed_dim (int): Dimensionality of input tokens.
|
||||
num_heads (int): Number of attention heads.
|
||||
output_dim (int): Dimensionality of output tokens. Defaults to embed_dim.
|
||||
"""
|
||||
super().__init__()
|
||||
self.num_heads = num_heads
|
||||
self.to_kv = operations.Linear(embed_dim, 2 * embed_dim, device=device, dtype=dtype)
|
||||
self.to_q = operations.Linear(embed_dim, embed_dim, device=device, dtype=dtype)
|
||||
self.to_out = operations.Linear(embed_dim, output_dim or embed_dim, device=device, dtype=dtype)
|
||||
|
||||
def forward(self, x, mask):
|
||||
"""
|
||||
Args:
|
||||
x (torch.Tensor): (B, L, D) tensor of input tokens.
|
||||
mask (torch.Tensor): (B, L) boolean tensor indicating which tokens are not padding.
|
||||
|
||||
NOTE: We assume x does not require gradients.
|
||||
|
||||
Returns:
|
||||
x (torch.Tensor): (B, D) tensor of pooled tokens.
|
||||
"""
|
||||
D = x.size(2)
|
||||
|
||||
# Construct attention mask, shape: (B, 1, num_queries=1, num_keys=1+L).
|
||||
attn_mask = mask[:, None, None, :].bool() # (B, 1, 1, L).
|
||||
attn_mask = F.pad(attn_mask, (1, 0), value=True) # (B, 1, 1, 1+L).
|
||||
|
||||
# Average non-padding token features. These will be used as the query.
|
||||
x_pool = pool_tokens(x, mask, keepdim=True) # (B, 1, D)
|
||||
|
||||
# Concat pooled features to input sequence.
|
||||
x = torch.cat([x_pool, x], dim=1) # (B, L+1, D)
|
||||
|
||||
# Compute queries, keys, values. Only the mean token is used to create a query.
|
||||
kv = self.to_kv(x) # (B, L+1, 2 * D)
|
||||
q = self.to_q(x[:, 0]) # (B, D)
|
||||
|
||||
# Extract heads.
|
||||
head_dim = D // self.num_heads
|
||||
kv = kv.unflatten(2, (2, self.num_heads, head_dim)) # (B, 1+L, 2, H, head_dim)
|
||||
kv = kv.transpose(1, 3) # (B, H, 2, 1+L, head_dim)
|
||||
k, v = kv.unbind(2) # (B, H, 1+L, head_dim)
|
||||
q = q.unflatten(1, (self.num_heads, head_dim)) # (B, H, head_dim)
|
||||
q = q.unsqueeze(2) # (B, H, 1, head_dim)
|
||||
|
||||
# Compute attention.
|
||||
x = F.scaled_dot_product_attention(
|
||||
q, k, v, attn_mask=attn_mask, dropout_p=0.0
|
||||
) # (B, H, 1, head_dim)
|
||||
|
||||
# Concatenate heads and run output.
|
||||
x = x.squeeze(2).flatten(1, 2) # (B, D = H * head_dim)
|
||||
x = self.to_out(x)
|
||||
return x
|
480
comfy/ldm/genmo/vae/model.py
Normal file
480
comfy/ldm/genmo/vae/model.py
Normal file
@ -0,0 +1,480 @@
|
||||
#original code from https://github.com/genmoai/models under apache 2.0 license
|
||||
#adapted to ComfyUI
|
||||
|
||||
from typing import Callable, List, Optional, Tuple, Union
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from einops import rearrange
|
||||
|
||||
import comfy.ops
|
||||
ops = comfy.ops.disable_weight_init
|
||||
|
||||
# import mochi_preview.dit.joint_model.context_parallel as cp
|
||||
# from mochi_preview.vae.cp_conv import cp_pass_frames, gather_all_frames
|
||||
|
||||
|
||||
def cast_tuple(t, length=1):
|
||||
return t if isinstance(t, tuple) else ((t,) * length)
|
||||
|
||||
|
||||
class GroupNormSpatial(ops.GroupNorm):
|
||||
"""
|
||||
GroupNorm applied per-frame.
|
||||
"""
|
||||
|
||||
def forward(self, x: torch.Tensor, *, chunk_size: int = 8):
|
||||
B, C, T, H, W = x.shape
|
||||
x = rearrange(x, "B C T H W -> (B T) C H W")
|
||||
# Run group norm in chunks.
|
||||
output = torch.empty_like(x)
|
||||
for b in range(0, B * T, chunk_size):
|
||||
output[b : b + chunk_size] = super().forward(x[b : b + chunk_size])
|
||||
return rearrange(output, "(B T) C H W -> B C T H W", B=B, T=T)
|
||||
|
||||
class PConv3d(ops.Conv3d):
|
||||
def __init__(
|
||||
self,
|
||||
in_channels,
|
||||
out_channels,
|
||||
kernel_size: Union[int, Tuple[int, int, int]],
|
||||
stride: Union[int, Tuple[int, int, int]],
|
||||
causal: bool = True,
|
||||
context_parallel: bool = True,
|
||||
**kwargs,
|
||||
):
|
||||
self.causal = causal
|
||||
self.context_parallel = context_parallel
|
||||
kernel_size = cast_tuple(kernel_size, 3)
|
||||
stride = cast_tuple(stride, 3)
|
||||
height_pad = (kernel_size[1] - 1) // 2
|
||||
width_pad = (kernel_size[2] - 1) // 2
|
||||
|
||||
super().__init__(
|
||||
in_channels=in_channels,
|
||||
out_channels=out_channels,
|
||||
kernel_size=kernel_size,
|
||||
stride=stride,
|
||||
dilation=(1, 1, 1),
|
||||
padding=(0, height_pad, width_pad),
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor):
|
||||
# Compute padding amounts.
|
||||
context_size = self.kernel_size[0] - 1
|
||||
if self.causal:
|
||||
pad_front = context_size
|
||||
pad_back = 0
|
||||
else:
|
||||
pad_front = context_size // 2
|
||||
pad_back = context_size - pad_front
|
||||
|
||||
# Apply padding.
|
||||
assert self.padding_mode == "replicate" # DEBUG
|
||||
mode = "constant" if self.padding_mode == "zeros" else self.padding_mode
|
||||
x = F.pad(x, (0, 0, 0, 0, pad_front, pad_back), mode=mode)
|
||||
return super().forward(x)
|
||||
|
||||
|
||||
class Conv1x1(ops.Linear):
|
||||
"""*1x1 Conv implemented with a linear layer."""
|
||||
|
||||
def __init__(self, in_features: int, out_features: int, *args, **kwargs):
|
||||
super().__init__(in_features, out_features, *args, **kwargs)
|
||||
|
||||
def forward(self, x: torch.Tensor):
|
||||
"""Forward pass.
|
||||
|
||||
Args:
|
||||
x: Input tensor. Shape: [B, C, *] or [B, *, C].
|
||||
|
||||
Returns:
|
||||
x: Output tensor. Shape: [B, C', *] or [B, *, C'].
|
||||
"""
|
||||
x = x.movedim(1, -1)
|
||||
x = super().forward(x)
|
||||
x = x.movedim(-1, 1)
|
||||
return x
|
||||
|
||||
|
||||
class DepthToSpaceTime(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
temporal_expansion: int,
|
||||
spatial_expansion: int,
|
||||
):
|
||||
super().__init__()
|
||||
self.temporal_expansion = temporal_expansion
|
||||
self.spatial_expansion = spatial_expansion
|
||||
|
||||
# When printed, this module should show the temporal and spatial expansion factors.
|
||||
def extra_repr(self):
|
||||
return f"texp={self.temporal_expansion}, sexp={self.spatial_expansion}"
|
||||
|
||||
def forward(self, x: torch.Tensor):
|
||||
"""Forward pass.
|
||||
|
||||
Args:
|
||||
x: Input tensor. Shape: [B, C, T, H, W].
|
||||
|
||||
Returns:
|
||||
x: Rearranged tensor. Shape: [B, C/(st*s*s), T*st, H*s, W*s].
|
||||
"""
|
||||
x = rearrange(
|
||||
x,
|
||||
"B (C st sh sw) T H W -> B C (T st) (H sh) (W sw)",
|
||||
st=self.temporal_expansion,
|
||||
sh=self.spatial_expansion,
|
||||
sw=self.spatial_expansion,
|
||||
)
|
||||
|
||||
# cp_rank, _ = cp.get_cp_rank_size()
|
||||
if self.temporal_expansion > 1: # and cp_rank == 0:
|
||||
# Drop the first self.temporal_expansion - 1 frames.
|
||||
# This is because we always want the 3x3x3 conv filter to only apply
|
||||
# to the first frame, and the first frame doesn't need to be repeated.
|
||||
assert all(x.shape)
|
||||
x = x[:, :, self.temporal_expansion - 1 :]
|
||||
assert all(x.shape)
|
||||
|
||||
return x
|
||||
|
||||
|
||||
def norm_fn(
|
||||
in_channels: int,
|
||||
affine: bool = True,
|
||||
):
|
||||
return GroupNormSpatial(affine=affine, num_groups=32, num_channels=in_channels)
|
||||
|
||||
|
||||
class ResBlock(nn.Module):
|
||||
"""Residual block that preserves the spatial dimensions."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
channels: int,
|
||||
*,
|
||||
affine: bool = True,
|
||||
attn_block: Optional[nn.Module] = None,
|
||||
padding_mode: str = "replicate",
|
||||
causal: bool = True,
|
||||
):
|
||||
super().__init__()
|
||||
self.channels = channels
|
||||
|
||||
assert causal
|
||||
self.stack = nn.Sequential(
|
||||
norm_fn(channels, affine=affine),
|
||||
nn.SiLU(inplace=True),
|
||||
PConv3d(
|
||||
in_channels=channels,
|
||||
out_channels=channels,
|
||||
kernel_size=(3, 3, 3),
|
||||
stride=(1, 1, 1),
|
||||
padding_mode=padding_mode,
|
||||
bias=True,
|
||||
# causal=causal,
|
||||
),
|
||||
norm_fn(channels, affine=affine),
|
||||
nn.SiLU(inplace=True),
|
||||
PConv3d(
|
||||
in_channels=channels,
|
||||
out_channels=channels,
|
||||
kernel_size=(3, 3, 3),
|
||||
stride=(1, 1, 1),
|
||||
padding_mode=padding_mode,
|
||||
bias=True,
|
||||
# causal=causal,
|
||||
),
|
||||
)
|
||||
|
||||
self.attn_block = attn_block if attn_block else nn.Identity()
|
||||
|
||||
def forward(self, x: torch.Tensor):
|
||||
"""Forward pass.
|
||||
|
||||
Args:
|
||||
x: Input tensor. Shape: [B, C, T, H, W].
|
||||
"""
|
||||
residual = x
|
||||
x = self.stack(x)
|
||||
x = x + residual
|
||||
del residual
|
||||
|
||||
return self.attn_block(x)
|
||||
|
||||
|
||||
class CausalUpsampleBlock(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
in_channels: int,
|
||||
out_channels: int,
|
||||
num_res_blocks: int,
|
||||
*,
|
||||
temporal_expansion: int = 2,
|
||||
spatial_expansion: int = 2,
|
||||
**block_kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
blocks = []
|
||||
for _ in range(num_res_blocks):
|
||||
blocks.append(block_fn(in_channels, **block_kwargs))
|
||||
self.blocks = nn.Sequential(*blocks)
|
||||
|
||||
self.temporal_expansion = temporal_expansion
|
||||
self.spatial_expansion = spatial_expansion
|
||||
|
||||
# Change channels in the final convolution layer.
|
||||
self.proj = Conv1x1(
|
||||
in_channels,
|
||||
out_channels * temporal_expansion * (spatial_expansion**2),
|
||||
)
|
||||
|
||||
self.d2st = DepthToSpaceTime(
|
||||
temporal_expansion=temporal_expansion, spatial_expansion=spatial_expansion
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.blocks(x)
|
||||
x = self.proj(x)
|
||||
x = self.d2st(x)
|
||||
return x
|
||||
|
||||
|
||||
def block_fn(channels, *, has_attention: bool = False, **block_kwargs):
|
||||
assert has_attention is False #NOTE: if this is ever true add back the attention code.
|
||||
|
||||
attn_block = None #AttentionBlock(channels) if has_attention else None
|
||||
|
||||
return ResBlock(
|
||||
channels, affine=True, attn_block=attn_block, **block_kwargs
|
||||
)
|
||||
|
||||
|
||||
class DownsampleBlock(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
in_channels: int,
|
||||
out_channels: int,
|
||||
num_res_blocks,
|
||||
*,
|
||||
temporal_reduction=2,
|
||||
spatial_reduction=2,
|
||||
**block_kwargs,
|
||||
):
|
||||
"""
|
||||
Downsample block for the VAE encoder.
|
||||
|
||||
Args:
|
||||
in_channels: Number of input channels.
|
||||
out_channels: Number of output channels.
|
||||
num_res_blocks: Number of residual blocks.
|
||||
temporal_reduction: Temporal reduction factor.
|
||||
spatial_reduction: Spatial reduction factor.
|
||||
"""
|
||||
super().__init__()
|
||||
layers = []
|
||||
|
||||
# Change the channel count in the strided convolution.
|
||||
# This lets the ResBlock have uniform channel count,
|
||||
# as in ConvNeXt.
|
||||
assert in_channels != out_channels
|
||||
layers.append(
|
||||
PConv3d(
|
||||
in_channels=in_channels,
|
||||
out_channels=out_channels,
|
||||
kernel_size=(temporal_reduction, spatial_reduction, spatial_reduction),
|
||||
stride=(temporal_reduction, spatial_reduction, spatial_reduction),
|
||||
padding_mode="replicate",
|
||||
bias=True,
|
||||
)
|
||||
)
|
||||
|
||||
for _ in range(num_res_blocks):
|
||||
layers.append(block_fn(out_channels, **block_kwargs))
|
||||
|
||||
self.layers = nn.Sequential(*layers)
|
||||
|
||||
def forward(self, x):
|
||||
return self.layers(x)
|
||||
|
||||
|
||||
def add_fourier_features(inputs: torch.Tensor, start=6, stop=8, step=1):
|
||||
num_freqs = (stop - start) // step
|
||||
assert inputs.ndim == 5
|
||||
C = inputs.size(1)
|
||||
|
||||
# Create Base 2 Fourier features.
|
||||
freqs = torch.arange(start, stop, step, dtype=inputs.dtype, device=inputs.device)
|
||||
assert num_freqs == len(freqs)
|
||||
w = torch.pow(2.0, freqs) * (2 * torch.pi) # [num_freqs]
|
||||
C = inputs.shape[1]
|
||||
w = w.repeat(C)[None, :, None, None, None] # [1, C * num_freqs, 1, 1, 1]
|
||||
|
||||
# Interleaved repeat of input channels to match w.
|
||||
h = inputs.repeat_interleave(num_freqs, dim=1) # [B, C * num_freqs, T, H, W]
|
||||
# Scale channels by frequency.
|
||||
h = w * h
|
||||
|
||||
return torch.cat(
|
||||
[
|
||||
inputs,
|
||||
torch.sin(h),
|
||||
torch.cos(h),
|
||||
],
|
||||
dim=1,
|
||||
)
|
||||
|
||||
|
||||
class FourierFeatures(nn.Module):
|
||||
def __init__(self, start: int = 6, stop: int = 8, step: int = 1):
|
||||
super().__init__()
|
||||
self.start = start
|
||||
self.stop = stop
|
||||
self.step = step
|
||||
|
||||
def forward(self, inputs):
|
||||
"""Add Fourier features to inputs.
|
||||
|
||||
Args:
|
||||
inputs: Input tensor. Shape: [B, C, T, H, W]
|
||||
|
||||
Returns:
|
||||
h: Output tensor. Shape: [B, (1 + 2 * num_freqs) * C, T, H, W]
|
||||
"""
|
||||
return add_fourier_features(inputs, self.start, self.stop, self.step)
|
||||
|
||||
|
||||
class Decoder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
out_channels: int = 3,
|
||||
latent_dim: int,
|
||||
base_channels: int,
|
||||
channel_multipliers: List[int],
|
||||
num_res_blocks: List[int],
|
||||
temporal_expansions: Optional[List[int]] = None,
|
||||
spatial_expansions: Optional[List[int]] = None,
|
||||
has_attention: List[bool],
|
||||
output_norm: bool = True,
|
||||
nonlinearity: str = "silu",
|
||||
output_nonlinearity: str = "silu",
|
||||
causal: bool = True,
|
||||
**block_kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
self.input_channels = latent_dim
|
||||
self.base_channels = base_channels
|
||||
self.channel_multipliers = channel_multipliers
|
||||
self.num_res_blocks = num_res_blocks
|
||||
self.output_nonlinearity = output_nonlinearity
|
||||
assert nonlinearity == "silu"
|
||||
assert causal
|
||||
|
||||
ch = [mult * base_channels for mult in channel_multipliers]
|
||||
self.num_up_blocks = len(ch) - 1
|
||||
assert len(num_res_blocks) == self.num_up_blocks + 2
|
||||
|
||||
blocks = []
|
||||
|
||||
first_block = [
|
||||
nn.Conv3d(latent_dim, ch[-1], kernel_size=(1, 1, 1))
|
||||
] # Input layer.
|
||||
# First set of blocks preserve channel count.
|
||||
for _ in range(num_res_blocks[-1]):
|
||||
first_block.append(
|
||||
block_fn(
|
||||
ch[-1],
|
||||
has_attention=has_attention[-1],
|
||||
causal=causal,
|
||||
**block_kwargs,
|
||||
)
|
||||
)
|
||||
blocks.append(nn.Sequential(*first_block))
|
||||
|
||||
assert len(temporal_expansions) == len(spatial_expansions) == self.num_up_blocks
|
||||
assert len(num_res_blocks) == len(has_attention) == self.num_up_blocks + 2
|
||||
|
||||
upsample_block_fn = CausalUpsampleBlock
|
||||
|
||||
for i in range(self.num_up_blocks):
|
||||
block = upsample_block_fn(
|
||||
ch[-i - 1],
|
||||
ch[-i - 2],
|
||||
num_res_blocks=num_res_blocks[-i - 2],
|
||||
has_attention=has_attention[-i - 2],
|
||||
temporal_expansion=temporal_expansions[-i - 1],
|
||||
spatial_expansion=spatial_expansions[-i - 1],
|
||||
causal=causal,
|
||||
**block_kwargs,
|
||||
)
|
||||
blocks.append(block)
|
||||
|
||||
assert not output_norm
|
||||
|
||||
# Last block. Preserve channel count.
|
||||
last_block = []
|
||||
for _ in range(num_res_blocks[0]):
|
||||
last_block.append(
|
||||
block_fn(
|
||||
ch[0], has_attention=has_attention[0], causal=causal, **block_kwargs
|
||||
)
|
||||
)
|
||||
blocks.append(nn.Sequential(*last_block))
|
||||
|
||||
self.blocks = nn.ModuleList(blocks)
|
||||
self.output_proj = Conv1x1(ch[0], out_channels)
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass.
|
||||
|
||||
Args:
|
||||
x: Latent tensor. Shape: [B, input_channels, t, h, w]. Scaled [-1, 1].
|
||||
|
||||
Returns:
|
||||
x: Reconstructed video tensor. Shape: [B, C, T, H, W]. Scaled to [-1, 1].
|
||||
T + 1 = (t - 1) * 4.
|
||||
H = h * 16, W = w * 16.
|
||||
"""
|
||||
for block in self.blocks:
|
||||
x = block(x)
|
||||
|
||||
if self.output_nonlinearity == "silu":
|
||||
x = F.silu(x, inplace=not self.training)
|
||||
else:
|
||||
assert (
|
||||
not self.output_nonlinearity
|
||||
) # StyleGAN3 omits the to-RGB nonlinearity.
|
||||
|
||||
return self.output_proj(x).contiguous()
|
||||
|
||||
|
||||
class VideoVAE(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.encoder = None #TODO once the model releases
|
||||
self.decoder = Decoder(
|
||||
out_channels=3,
|
||||
base_channels=128,
|
||||
channel_multipliers=[1, 2, 4, 6],
|
||||
temporal_expansions=[1, 2, 3],
|
||||
spatial_expansions=[2, 2, 2],
|
||||
num_res_blocks=[3, 3, 4, 6, 3],
|
||||
latent_dim=12,
|
||||
has_attention=[False, False, False, False, False],
|
||||
padding_mode="replicate",
|
||||
output_norm=False,
|
||||
nonlinearity="silu",
|
||||
output_nonlinearity="silu",
|
||||
causal=True,
|
||||
)
|
||||
|
||||
def encode(self, x):
|
||||
return self.encoder(x)
|
||||
|
||||
def decode(self, x):
|
||||
return self.decoder(x)
|
@ -1,11 +1,11 @@
|
||||
import logging
|
||||
import math
|
||||
from typing import Dict, Optional
|
||||
from typing import Dict, Optional, List
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from .. import attention
|
||||
from ..attention import optimized_attention
|
||||
from einops import rearrange, repeat
|
||||
from .util import timestep_embedding
|
||||
import comfy.ops
|
||||
@ -97,7 +97,7 @@ class PatchEmbed(nn.Module):
|
||||
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
|
||||
|
||||
def forward(self, x):
|
||||
B, C, H, W = x.shape
|
||||
# B, C, H, W = x.shape
|
||||
# if self.img_size is not None:
|
||||
# if self.strict_img_size:
|
||||
# _assert(H == self.img_size[0], f"Input height ({H}) doesn't match model ({self.img_size[0]}).")
|
||||
@ -266,8 +266,6 @@ def split_qkv(qkv, head_dim):
|
||||
qkv = qkv.reshape(qkv.shape[0], qkv.shape[1], 3, -1, head_dim).movedim(2, 0)
|
||||
return qkv[0], qkv[1], qkv[2]
|
||||
|
||||
def optimized_attention(qkv, num_heads):
|
||||
return attention.optimized_attention(qkv[0], qkv[1], qkv[2], num_heads)
|
||||
|
||||
class SelfAttention(nn.Module):
|
||||
ATTENTION_MODES = ("xformers", "torch", "torch-hb", "math", "debug")
|
||||
@ -326,9 +324,9 @@ class SelfAttention(nn.Module):
|
||||
return x
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
qkv = self.pre_attention(x)
|
||||
q, k, v = self.pre_attention(x)
|
||||
x = optimized_attention(
|
||||
qkv, num_heads=self.num_heads
|
||||
q, k, v, heads=self.num_heads
|
||||
)
|
||||
x = self.post_attention(x)
|
||||
return x
|
||||
@ -417,6 +415,7 @@ class DismantledBlock(nn.Module):
|
||||
scale_mod_only: bool = False,
|
||||
swiglu: bool = False,
|
||||
qk_norm: Optional[str] = None,
|
||||
x_block_self_attn: bool = False,
|
||||
dtype=None,
|
||||
device=None,
|
||||
operations=None,
|
||||
@ -440,6 +439,24 @@ class DismantledBlock(nn.Module):
|
||||
device=device,
|
||||
operations=operations
|
||||
)
|
||||
if x_block_self_attn:
|
||||
assert not pre_only
|
||||
assert not scale_mod_only
|
||||
self.x_block_self_attn = True
|
||||
self.attn2 = SelfAttention(
|
||||
dim=hidden_size,
|
||||
num_heads=num_heads,
|
||||
qkv_bias=qkv_bias,
|
||||
attn_mode=attn_mode,
|
||||
pre_only=False,
|
||||
qk_norm=qk_norm,
|
||||
rmsnorm=rmsnorm,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
operations=operations
|
||||
)
|
||||
else:
|
||||
self.x_block_self_attn = False
|
||||
if not pre_only:
|
||||
if not rmsnorm:
|
||||
self.norm2 = operations.LayerNorm(
|
||||
@ -466,7 +483,11 @@ class DismantledBlock(nn.Module):
|
||||
multiple_of=256,
|
||||
)
|
||||
self.scale_mod_only = scale_mod_only
|
||||
if not scale_mod_only:
|
||||
if x_block_self_attn:
|
||||
assert not pre_only
|
||||
assert not scale_mod_only
|
||||
n_mods = 9
|
||||
elif not scale_mod_only:
|
||||
n_mods = 6 if not pre_only else 2
|
||||
else:
|
||||
n_mods = 4 if not pre_only else 1
|
||||
@ -527,14 +548,64 @@ class DismantledBlock(nn.Module):
|
||||
)
|
||||
return x
|
||||
|
||||
def pre_attention_x(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
|
||||
assert self.x_block_self_attn
|
||||
(
|
||||
shift_msa,
|
||||
scale_msa,
|
||||
gate_msa,
|
||||
shift_mlp,
|
||||
scale_mlp,
|
||||
gate_mlp,
|
||||
shift_msa2,
|
||||
scale_msa2,
|
||||
gate_msa2,
|
||||
) = self.adaLN_modulation(c).chunk(9, dim=1)
|
||||
x_norm = self.norm1(x)
|
||||
qkv = self.attn.pre_attention(modulate(x_norm, shift_msa, scale_msa))
|
||||
qkv2 = self.attn2.pre_attention(modulate(x_norm, shift_msa2, scale_msa2))
|
||||
return qkv, qkv2, (
|
||||
x,
|
||||
gate_msa,
|
||||
shift_mlp,
|
||||
scale_mlp,
|
||||
gate_mlp,
|
||||
gate_msa2,
|
||||
)
|
||||
|
||||
def post_attention_x(self, attn, attn2, x, gate_msa, shift_mlp, scale_mlp, gate_mlp, gate_msa2):
|
||||
assert not self.pre_only
|
||||
attn1 = self.attn.post_attention(attn)
|
||||
attn2 = self.attn2.post_attention(attn2)
|
||||
out1 = gate_msa.unsqueeze(1) * attn1
|
||||
out2 = gate_msa2.unsqueeze(1) * attn2
|
||||
x = x + out1
|
||||
x = x + out2
|
||||
x = x + gate_mlp.unsqueeze(1) * self.mlp(
|
||||
modulate(self.norm2(x), shift_mlp, scale_mlp)
|
||||
)
|
||||
return x
|
||||
|
||||
def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
|
||||
assert not self.pre_only
|
||||
qkv, intermediates = self.pre_attention(x, c)
|
||||
attn = optimized_attention(
|
||||
qkv,
|
||||
num_heads=self.attn.num_heads,
|
||||
)
|
||||
return self.post_attention(attn, *intermediates)
|
||||
if self.x_block_self_attn:
|
||||
qkv, qkv2, intermediates = self.pre_attention_x(x, c)
|
||||
attn, _ = optimized_attention(
|
||||
qkv[0], qkv[1], qkv[2],
|
||||
num_heads=self.attn.num_heads,
|
||||
)
|
||||
attn2, _ = optimized_attention(
|
||||
qkv2[0], qkv2[1], qkv2[2],
|
||||
num_heads=self.attn2.num_heads,
|
||||
)
|
||||
return self.post_attention_x(attn, attn2, *intermediates)
|
||||
else:
|
||||
qkv, intermediates = self.pre_attention(x, c)
|
||||
attn = optimized_attention(
|
||||
qkv[0], qkv[1], qkv[2],
|
||||
heads=self.attn.num_heads,
|
||||
)
|
||||
return self.post_attention(attn, *intermediates)
|
||||
|
||||
|
||||
def block_mixing(*args, use_checkpoint=True, **kwargs):
|
||||
@ -549,7 +620,10 @@ def block_mixing(*args, use_checkpoint=True, **kwargs):
|
||||
def _block_mixing(context, x, context_block, x_block, c):
|
||||
context_qkv, context_intermediates = context_block.pre_attention(context, c)
|
||||
|
||||
x_qkv, x_intermediates = x_block.pre_attention(x, c)
|
||||
if x_block.x_block_self_attn:
|
||||
x_qkv, x_qkv2, x_intermediates = x_block.pre_attention_x(x, c)
|
||||
else:
|
||||
x_qkv, x_intermediates = x_block.pre_attention(x, c)
|
||||
|
||||
o = []
|
||||
for t in range(3):
|
||||
@ -557,8 +631,8 @@ def _block_mixing(context, x, context_block, x_block, c):
|
||||
qkv = tuple(o)
|
||||
|
||||
attn = optimized_attention(
|
||||
qkv,
|
||||
num_heads=x_block.attn.num_heads,
|
||||
qkv[0], qkv[1], qkv[2],
|
||||
heads=x_block.attn.num_heads,
|
||||
)
|
||||
context_attn, x_attn = (
|
||||
attn[:, : context_qkv[0].shape[1]],
|
||||
@ -570,7 +644,14 @@ def _block_mixing(context, x, context_block, x_block, c):
|
||||
|
||||
else:
|
||||
context = None
|
||||
x = x_block.post_attention(x_attn, *x_intermediates)
|
||||
if x_block.x_block_self_attn:
|
||||
attn2 = optimized_attention(
|
||||
x_qkv2[0], x_qkv2[1], x_qkv2[2],
|
||||
heads=x_block.attn2.num_heads,
|
||||
)
|
||||
x = x_block.post_attention_x(x_attn, attn2, *x_intermediates)
|
||||
else:
|
||||
x = x_block.post_attention(x_attn, *x_intermediates)
|
||||
return context, x
|
||||
|
||||
|
||||
@ -585,8 +666,13 @@ class JointBlock(nn.Module):
|
||||
super().__init__()
|
||||
pre_only = kwargs.pop("pre_only")
|
||||
qk_norm = kwargs.pop("qk_norm", None)
|
||||
x_block_self_attn = kwargs.pop("x_block_self_attn", False)
|
||||
self.context_block = DismantledBlock(*args, pre_only=pre_only, qk_norm=qk_norm, **kwargs)
|
||||
self.x_block = DismantledBlock(*args, pre_only=False, qk_norm=qk_norm, **kwargs)
|
||||
self.x_block = DismantledBlock(*args,
|
||||
pre_only=False,
|
||||
qk_norm=qk_norm,
|
||||
x_block_self_attn=x_block_self_attn,
|
||||
**kwargs)
|
||||
|
||||
def forward(self, *args, **kwargs):
|
||||
return block_mixing(
|
||||
@ -642,7 +728,7 @@ class SelfAttentionContext(nn.Module):
|
||||
def forward(self, x):
|
||||
qkv = self.qkv(x)
|
||||
q, k, v = split_qkv(qkv, self.dim_head)
|
||||
x = optimized_attention((q.reshape(q.shape[0], q.shape[1], -1), k, v), self.heads)
|
||||
x = optimized_attention(q.reshape(q.shape[0], q.shape[1], -1), k, v, heads=self.heads)
|
||||
return self.proj(x)
|
||||
|
||||
class ContextProcessorBlock(nn.Module):
|
||||
@ -701,9 +787,12 @@ class MMDiT(nn.Module):
|
||||
qk_norm: Optional[str] = None,
|
||||
qkv_bias: bool = True,
|
||||
context_processor_layers = None,
|
||||
x_block_self_attn: bool = False,
|
||||
x_block_self_attn_layers: Optional[List[int]] = [],
|
||||
context_size = 4096,
|
||||
num_blocks = None,
|
||||
final_layer = True,
|
||||
skip_blocks = False,
|
||||
dtype = None, #TODO
|
||||
device = None,
|
||||
operations = None,
|
||||
@ -718,6 +807,7 @@ class MMDiT(nn.Module):
|
||||
self.pos_embed_scaling_factor = pos_embed_scaling_factor
|
||||
self.pos_embed_offset = pos_embed_offset
|
||||
self.pos_embed_max_size = pos_embed_max_size
|
||||
self.x_block_self_attn_layers = x_block_self_attn_layers
|
||||
|
||||
# hidden_size = default(hidden_size, 64 * depth)
|
||||
# num_heads = default(num_heads, hidden_size // 64)
|
||||
@ -775,26 +865,28 @@ class MMDiT(nn.Module):
|
||||
self.pos_embed = None
|
||||
|
||||
self.use_checkpoint = use_checkpoint
|
||||
self.joint_blocks = nn.ModuleList(
|
||||
[
|
||||
JointBlock(
|
||||
self.hidden_size,
|
||||
num_heads,
|
||||
mlp_ratio=mlp_ratio,
|
||||
qkv_bias=qkv_bias,
|
||||
attn_mode=attn_mode,
|
||||
pre_only=(i == num_blocks - 1) and final_layer,
|
||||
rmsnorm=rmsnorm,
|
||||
scale_mod_only=scale_mod_only,
|
||||
swiglu=swiglu,
|
||||
qk_norm=qk_norm,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
operations=operations
|
||||
)
|
||||
for i in range(num_blocks)
|
||||
]
|
||||
)
|
||||
if not skip_blocks:
|
||||
self.joint_blocks = nn.ModuleList(
|
||||
[
|
||||
JointBlock(
|
||||
self.hidden_size,
|
||||
num_heads,
|
||||
mlp_ratio=mlp_ratio,
|
||||
qkv_bias=qkv_bias,
|
||||
attn_mode=attn_mode,
|
||||
pre_only=(i == num_blocks - 1) and final_layer,
|
||||
rmsnorm=rmsnorm,
|
||||
scale_mod_only=scale_mod_only,
|
||||
swiglu=swiglu,
|
||||
qk_norm=qk_norm,
|
||||
x_block_self_attn=(i in self.x_block_self_attn_layers) or x_block_self_attn,
|
||||
dtype=dtype,
|
||||
device=device,
|
||||
operations=operations,
|
||||
)
|
||||
for i in range(num_blocks)
|
||||
]
|
||||
)
|
||||
|
||||
if final_layer:
|
||||
self.final_layer = FinalLayer(self.hidden_size, patch_size, self.out_channels, dtype=dtype, device=device, operations=operations)
|
||||
@ -857,7 +949,9 @@ class MMDiT(nn.Module):
|
||||
c_mod: torch.Tensor,
|
||||
context: Optional[torch.Tensor] = None,
|
||||
control = None,
|
||||
transformer_options = {},
|
||||
) -> torch.Tensor:
|
||||
patches_replace = transformer_options.get("patches_replace", {})
|
||||
if self.register_length > 0:
|
||||
context = torch.cat(
|
||||
(
|
||||
@ -869,14 +963,25 @@ class MMDiT(nn.Module):
|
||||
|
||||
# context is B, L', D
|
||||
# x is B, L, D
|
||||
blocks_replace = patches_replace.get("dit", {})
|
||||
blocks = len(self.joint_blocks)
|
||||
for i in range(blocks):
|
||||
context, x = self.joint_blocks[i](
|
||||
context,
|
||||
x,
|
||||
c=c_mod,
|
||||
use_checkpoint=self.use_checkpoint,
|
||||
)
|
||||
if ("double_block", i) in blocks_replace:
|
||||
def block_wrap(args):
|
||||
out = {}
|
||||
out["txt"], out["img"] = self.joint_blocks[i](args["txt"], args["img"], c=args["vec"])
|
||||
return out
|
||||
|
||||
out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": c_mod}, {"original_block": block_wrap})
|
||||
context = out["txt"]
|
||||
x = out["img"]
|
||||
else:
|
||||
context, x = self.joint_blocks[i](
|
||||
context,
|
||||
x,
|
||||
c=c_mod,
|
||||
use_checkpoint=self.use_checkpoint,
|
||||
)
|
||||
if control is not None:
|
||||
control_o = control.get("output")
|
||||
if i < len(control_o):
|
||||
@ -894,6 +999,7 @@ class MMDiT(nn.Module):
|
||||
y: Optional[torch.Tensor] = None,
|
||||
context: Optional[torch.Tensor] = None,
|
||||
control = None,
|
||||
transformer_options = {},
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Forward pass of DiT.
|
||||
@ -915,7 +1021,7 @@ class MMDiT(nn.Module):
|
||||
if context is not None:
|
||||
context = self.context_embedder(context)
|
||||
|
||||
x = self.forward_core_with_concat(x, c, context, control)
|
||||
x = self.forward_core_with_concat(x, c, context, control, transformer_options)
|
||||
|
||||
x = self.unpatchify(x, hw=hw) # (N, out_channels, H, W)
|
||||
return x[:,:,:hw[-2],:hw[-1]]
|
||||
@ -929,7 +1035,8 @@ class OpenAISignatureMMDITWrapper(MMDiT):
|
||||
context: Optional[torch.Tensor] = None,
|
||||
y: Optional[torch.Tensor] = None,
|
||||
control = None,
|
||||
transformer_options = {},
|
||||
**kwargs,
|
||||
) -> torch.Tensor:
|
||||
return super().forward(x, timesteps, context=context, y=y, control=control)
|
||||
return super().forward(x, timesteps, context=context, y=y, control=control, transformer_options=transformer_options)
|
||||
|
||||
|
@ -317,6 +317,10 @@ def model_lora_keys_unet(model, key_map={}):
|
||||
key_lora = "lora_transformer_{}".format(k[:-len(".weight")].replace(".", "_")) #OneTrainer lora
|
||||
key_map[key_lora] = to
|
||||
|
||||
key_lora = "lycoris_{}".format(k[:-len(".weight")].replace(".", "_")) #simpletuner lycoris format
|
||||
key_map[key_lora] = to
|
||||
|
||||
|
||||
if isinstance(model, comfy.model_base.AuraFlow): #Diffusers lora AuraFlow
|
||||
diffusers_keys = comfy.utils.auraflow_to_diffusers(model.model_config.unet_config, output_prefix="diffusion_model.")
|
||||
for k in diffusers_keys:
|
||||
@ -415,7 +419,7 @@ def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32):
|
||||
weight *= strength_model
|
||||
|
||||
if isinstance(v, list):
|
||||
v = (calculate_weight(v[1:], comfy.model_management.cast_to_device(v[0], weight.device, intermediate_dtype, copy=True), key, intermediate_dtype=intermediate_dtype), )
|
||||
v = (calculate_weight(v[1:], v[0][1](comfy.model_management.cast_to_device(v[0][0], weight.device, intermediate_dtype, copy=True), inplace=True), key, intermediate_dtype=intermediate_dtype), )
|
||||
|
||||
if len(v) == 1:
|
||||
patch_type = "diff"
|
||||
|
@ -24,6 +24,7 @@ from comfy.ldm.cascade.stage_b import StageB
|
||||
from comfy.ldm.modules.encoders.noise_aug_modules import CLIPEmbeddingNoiseAugmentation
|
||||
from comfy.ldm.modules.diffusionmodules.upscaling import ImageConcatWithNoiseAugmentation
|
||||
from comfy.ldm.modules.diffusionmodules.mmdit import OpenAISignatureMMDITWrapper
|
||||
import comfy.ldm.genmo.joint_model.asymm_models_joint
|
||||
import comfy.ldm.aura.mmdit
|
||||
import comfy.ldm.hydit.models
|
||||
import comfy.ldm.audio.dit
|
||||
@ -96,7 +97,8 @@ class BaseModel(torch.nn.Module):
|
||||
|
||||
if not unet_config.get("disable_unet_model_creation", False):
|
||||
if model_config.custom_operations is None:
|
||||
operations = comfy.ops.pick_operations(unet_config.get("dtype", None), self.manual_cast_dtype, fp8_optimizations=model_config.optimizations.get("fp8", False))
|
||||
fp8 = model_config.optimizations.get("fp8", model_config.scaled_fp8 is not None)
|
||||
operations = comfy.ops.pick_operations(unet_config.get("dtype", None), self.manual_cast_dtype, fp8_optimizations=fp8, scaled_fp8=model_config.scaled_fp8)
|
||||
else:
|
||||
operations = model_config.custom_operations
|
||||
self.diffusion_model = unet_model(**unet_config, device=device, operations=operations)
|
||||
@ -244,6 +246,10 @@ class BaseModel(torch.nn.Module):
|
||||
extra_sds.append(self.model_config.process_clip_vision_state_dict_for_saving(clip_vision_state_dict))
|
||||
|
||||
unet_state_dict = self.diffusion_model.state_dict()
|
||||
|
||||
if self.model_config.scaled_fp8 is not None:
|
||||
unet_state_dict["scaled_fp8"] = torch.tensor([], dtype=self.model_config.scaled_fp8)
|
||||
|
||||
unet_state_dict = self.model_config.process_unet_state_dict_for_saving(unet_state_dict)
|
||||
|
||||
if self.model_type == ModelType.V_PREDICTION:
|
||||
@ -713,3 +719,18 @@ class Flux(BaseModel):
|
||||
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
|
||||
out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([kwargs.get("guidance", 3.5)]))
|
||||
return out
|
||||
|
||||
class GenmoMochi(BaseModel):
|
||||
def __init__(self, model_config, model_type=ModelType.FLOW, device=None):
|
||||
super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.genmo.joint_model.asymm_models_joint.AsymmDiTJoint)
|
||||
|
||||
def extra_conds(self, **kwargs):
|
||||
out = super().extra_conds(**kwargs)
|
||||
attention_mask = kwargs.get("attention_mask", None)
|
||||
if attention_mask is not None:
|
||||
out['attention_mask'] = comfy.conds.CONDRegular(attention_mask)
|
||||
out['num_tokens'] = comfy.conds.CONDConstant(max(1, torch.sum(attention_mask).item()))
|
||||
cross_attn = kwargs.get("cross_attn", None)
|
||||
if cross_attn is not None:
|
||||
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
|
||||
return out
|
||||
|
@ -70,6 +70,11 @@ def detect_unet_config(state_dict, key_prefix):
|
||||
context_processor = '{}context_processor.layers.0.attn.qkv.weight'.format(key_prefix)
|
||||
if context_processor in state_dict_keys:
|
||||
unet_config["context_processor_layers"] = count_blocks(state_dict_keys, '{}context_processor.layers.'.format(key_prefix) + '{}.')
|
||||
unet_config["x_block_self_attn_layers"] = []
|
||||
for key in state_dict_keys:
|
||||
if key.startswith('{}joint_blocks.'.format(key_prefix)) and key.endswith('.x_block.attn2.qkv.weight'):
|
||||
layer = key[len('{}joint_blocks.'.format(key_prefix)):-len('.x_block.attn2.qkv.weight')]
|
||||
unet_config["x_block_self_attn_layers"].append(int(layer))
|
||||
return unet_config
|
||||
|
||||
if '{}clf.1.weight'.format(key_prefix) in state_dict_keys: #stable cascade
|
||||
@ -145,6 +150,34 @@ def detect_unet_config(state_dict, key_prefix):
|
||||
dit_config["guidance_embed"] = "{}guidance_in.in_layer.weight".format(key_prefix) in state_dict_keys
|
||||
return dit_config
|
||||
|
||||
if '{}t5_yproj.weight'.format(key_prefix) in state_dict_keys: #Genmo mochi preview
|
||||
dit_config = {}
|
||||
dit_config["image_model"] = "mochi_preview"
|
||||
dit_config["depth"] = 48
|
||||
dit_config["patch_size"] = 2
|
||||
dit_config["num_heads"] = 24
|
||||
dit_config["hidden_size_x"] = 3072
|
||||
dit_config["hidden_size_y"] = 1536
|
||||
dit_config["mlp_ratio_x"] = 4.0
|
||||
dit_config["mlp_ratio_y"] = 4.0
|
||||
dit_config["learn_sigma"] = False
|
||||
dit_config["in_channels"] = 12
|
||||
dit_config["qk_norm"] = True
|
||||
dit_config["qkv_bias"] = False
|
||||
dit_config["out_bias"] = True
|
||||
dit_config["attn_drop"] = 0.0
|
||||
dit_config["patch_embed_bias"] = True
|
||||
dit_config["posenc_preserve_area"] = True
|
||||
dit_config["timestep_mlp_bias"] = True
|
||||
dit_config["attend_to_padding"] = False
|
||||
dit_config["timestep_scale"] = 1000.0
|
||||
dit_config["use_t5"] = True
|
||||
dit_config["t5_feat_dim"] = 4096
|
||||
dit_config["t5_token_length"] = 256
|
||||
dit_config["rope_theta"] = 10000.0
|
||||
return dit_config
|
||||
|
||||
|
||||
if '{}input_blocks.0.0.weight'.format(key_prefix) not in state_dict_keys:
|
||||
return None
|
||||
|
||||
@ -286,9 +319,15 @@ def model_config_from_unet(state_dict, unet_key_prefix, use_base_if_no_match=Fal
|
||||
return None
|
||||
model_config = model_config_from_unet_config(unet_config, state_dict)
|
||||
if model_config is None and use_base_if_no_match:
|
||||
return comfy.supported_models_base.BASE(unet_config)
|
||||
else:
|
||||
return model_config
|
||||
model_config = comfy.supported_models_base.BASE(unet_config)
|
||||
|
||||
scaled_fp8_weight = state_dict.get("{}scaled_fp8".format(unet_key_prefix), None)
|
||||
if scaled_fp8_weight is not None:
|
||||
model_config.scaled_fp8 = scaled_fp8_weight.dtype
|
||||
if model_config.scaled_fp8 == torch.float32:
|
||||
model_config.scaled_fp8 = torch.float8_e4m3fn
|
||||
|
||||
return model_config
|
||||
|
||||
def unet_prefix_from_state_dict(state_dict):
|
||||
candidates = ["model.diffusion_model.", #ldm/sgm models
|
||||
|
@ -647,6 +647,9 @@ def unet_dtype(device=None, model_params=0, supported_dtypes=[torch.float16, tor
|
||||
pass
|
||||
|
||||
if fp8_dtype is not None:
|
||||
if supports_fp8_compute(device): #if fp8 compute is supported the casting is most likely not expensive
|
||||
return fp8_dtype
|
||||
|
||||
free_model_memory = maximum_vram_for_weights(device)
|
||||
if model_params * 2 > free_model_memory:
|
||||
return fp8_dtype
|
||||
@ -840,27 +843,21 @@ def force_channels_last():
|
||||
#TODO
|
||||
return False
|
||||
|
||||
def cast_to(weight, dtype=None, device=None, non_blocking=False, copy=False):
|
||||
if device is None or weight.device == device:
|
||||
if not copy:
|
||||
if dtype is None or weight.dtype == dtype:
|
||||
return weight
|
||||
return weight.to(dtype=dtype, copy=copy)
|
||||
|
||||
r = torch.empty_like(weight, dtype=dtype, device=device)
|
||||
r.copy_(weight, non_blocking=non_blocking)
|
||||
return r
|
||||
|
||||
def cast_to_device(tensor, device, dtype, copy=False):
|
||||
device_supports_cast = False
|
||||
if tensor.dtype == torch.float32 or tensor.dtype == torch.float16:
|
||||
device_supports_cast = True
|
||||
elif tensor.dtype == torch.bfloat16:
|
||||
if hasattr(device, 'type') and device.type.startswith("cuda"):
|
||||
device_supports_cast = True
|
||||
elif is_intel_xpu():
|
||||
device_supports_cast = True
|
||||
non_blocking = device_supports_non_blocking(device)
|
||||
return cast_to(tensor, dtype=dtype, device=device, non_blocking=non_blocking, copy=copy)
|
||||
|
||||
non_blocking = device_should_use_non_blocking(device)
|
||||
|
||||
if device_supports_cast:
|
||||
if copy:
|
||||
if tensor.device == device:
|
||||
return tensor.to(dtype, copy=copy, non_blocking=non_blocking)
|
||||
return tensor.to(device, copy=copy, non_blocking=non_blocking).to(dtype, non_blocking=non_blocking)
|
||||
else:
|
||||
return tensor.to(device, non_blocking=non_blocking).to(dtype, non_blocking=non_blocking)
|
||||
else:
|
||||
return tensor.to(device, dtype, copy=copy, non_blocking=non_blocking)
|
||||
|
||||
def xformers_enabled():
|
||||
global directml_enabled
|
||||
|
@ -94,6 +94,31 @@ class LowVramPatch:
|
||||
return comfy.float.stochastic_rounding(comfy.lora.calculate_weight(self.patches[self.key], weight.to(intermediate_dtype), self.key, intermediate_dtype=intermediate_dtype), weight.dtype, seed=string_to_seed(self.key))
|
||||
|
||||
return comfy.lora.calculate_weight(self.patches[self.key], weight, self.key, intermediate_dtype=intermediate_dtype)
|
||||
|
||||
def get_key_weight(model, key):
|
||||
set_func = None
|
||||
convert_func = None
|
||||
op_keys = key.rsplit('.', 1)
|
||||
if len(op_keys) < 2:
|
||||
weight = comfy.utils.get_attr(model, key)
|
||||
else:
|
||||
op = comfy.utils.get_attr(model, op_keys[0])
|
||||
try:
|
||||
set_func = getattr(op, "set_{}".format(op_keys[1]))
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
convert_func = getattr(op, "convert_{}".format(op_keys[1]))
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
weight = getattr(op, op_keys[1])
|
||||
if convert_func is not None:
|
||||
weight = comfy.utils.get_attr(model, key)
|
||||
|
||||
return weight, set_func, convert_func
|
||||
|
||||
class ModelPatcher:
|
||||
def __init__(self, model, load_device, offload_device, size=0, weight_inplace_update=False):
|
||||
self.size = size
|
||||
@ -294,14 +319,16 @@ class ModelPatcher:
|
||||
if not k.startswith(filter_prefix):
|
||||
continue
|
||||
bk = self.backup.get(k, None)
|
||||
weight, set_func, convert_func = get_key_weight(self.model, k)
|
||||
if bk is not None:
|
||||
weight = bk.weight
|
||||
else:
|
||||
weight = model_sd[k]
|
||||
if convert_func is None:
|
||||
convert_func = lambda a, **kwargs: a
|
||||
|
||||
if k in self.patches:
|
||||
p[k] = [weight] + self.patches[k]
|
||||
p[k] = [(weight, convert_func)] + self.patches[k]
|
||||
else:
|
||||
p[k] = (weight,)
|
||||
p[k] = [(weight, convert_func)]
|
||||
return p
|
||||
|
||||
def model_state_dict(self, filter_prefix=None):
|
||||
@ -317,8 +344,7 @@ class ModelPatcher:
|
||||
if key not in self.patches:
|
||||
return
|
||||
|
||||
weight = comfy.utils.get_attr(self.model, key)
|
||||
|
||||
weight, set_func, convert_func = get_key_weight(self.model, key)
|
||||
inplace_update = self.weight_inplace_update or inplace_update
|
||||
|
||||
if key not in self.backup:
|
||||
@ -328,12 +354,18 @@ class ModelPatcher:
|
||||
temp_weight = comfy.model_management.cast_to_device(weight, device_to, torch.float32, copy=True)
|
||||
else:
|
||||
temp_weight = weight.to(torch.float32, copy=True)
|
||||
if convert_func is not None:
|
||||
temp_weight = convert_func(temp_weight, inplace=True)
|
||||
|
||||
out_weight = comfy.lora.calculate_weight(self.patches[key], temp_weight, key)
|
||||
out_weight = comfy.float.stochastic_rounding(out_weight, weight.dtype, seed=string_to_seed(key))
|
||||
if inplace_update:
|
||||
comfy.utils.copy_to_param(self.model, key, out_weight)
|
||||
if set_func is None:
|
||||
out_weight = comfy.float.stochastic_rounding(out_weight, weight.dtype, seed=string_to_seed(key))
|
||||
if inplace_update:
|
||||
comfy.utils.copy_to_param(self.model, key, out_weight)
|
||||
else:
|
||||
comfy.utils.set_attr_param(self.model, key, out_weight)
|
||||
else:
|
||||
comfy.utils.set_attr_param(self.model, key, out_weight)
|
||||
set_func(out_weight, inplace_update=inplace_update, seed=string_to_seed(key))
|
||||
|
||||
def load(self, device_to=None, lowvram_model_memory=0, force_patch_weights=False, full_load=False):
|
||||
mem_counter = 0
|
||||
|
104
comfy/ops.py
104
comfy/ops.py
@ -19,20 +19,12 @@
|
||||
import torch
|
||||
import comfy.model_management
|
||||
from comfy.cli_args import args
|
||||
import comfy.float
|
||||
|
||||
def cast_to(weight, dtype=None, device=None, non_blocking=False, copy=False):
|
||||
if device is None or weight.device == device:
|
||||
if not copy:
|
||||
if dtype is None or weight.dtype == dtype:
|
||||
return weight
|
||||
return weight.to(dtype=dtype, copy=copy)
|
||||
|
||||
r = torch.empty_like(weight, dtype=dtype, device=device)
|
||||
r.copy_(weight, non_blocking=non_blocking)
|
||||
return r
|
||||
cast_to = comfy.model_management.cast_to #TODO: remove once no more references
|
||||
|
||||
def cast_to_input(weight, input, non_blocking=False, copy=True):
|
||||
return cast_to(weight, input.dtype, input.device, non_blocking=non_blocking, copy=copy)
|
||||
return comfy.model_management.cast_to(weight, input.dtype, input.device, non_blocking=non_blocking, copy=copy)
|
||||
|
||||
def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None):
|
||||
if input is not None:
|
||||
@ -47,12 +39,12 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None):
|
||||
non_blocking = comfy.model_management.device_supports_non_blocking(device)
|
||||
if s.bias is not None:
|
||||
has_function = s.bias_function is not None
|
||||
bias = cast_to(s.bias, bias_dtype, device, non_blocking=non_blocking, copy=has_function)
|
||||
bias = comfy.model_management.cast_to(s.bias, bias_dtype, device, non_blocking=non_blocking, copy=has_function)
|
||||
if has_function:
|
||||
bias = s.bias_function(bias)
|
||||
|
||||
has_function = s.weight_function is not None
|
||||
weight = cast_to(s.weight, dtype, device, non_blocking=non_blocking, copy=has_function)
|
||||
weight = comfy.model_management.cast_to(s.weight, dtype, device, non_blocking=non_blocking, copy=has_function)
|
||||
if has_function:
|
||||
weight = s.weight_function(weight)
|
||||
return weight, bias
|
||||
@ -258,19 +250,29 @@ def fp8_linear(self, input):
|
||||
if dtype not in [torch.float8_e4m3fn]:
|
||||
return None
|
||||
|
||||
tensor_2d = False
|
||||
if len(input.shape) == 2:
|
||||
tensor_2d = True
|
||||
input = input.unsqueeze(1)
|
||||
|
||||
|
||||
if len(input.shape) == 3:
|
||||
inn = input.reshape(-1, input.shape[2]).to(dtype)
|
||||
w, bias = cast_bias_weight(self, input, dtype=dtype, bias_dtype=input.dtype)
|
||||
w = w.t()
|
||||
|
||||
scale_weight = self.scale_weight
|
||||
scale_input = self.scale_input
|
||||
if scale_weight is None:
|
||||
scale_weight = torch.ones((1), device=input.device, dtype=torch.float32)
|
||||
if scale_input is None:
|
||||
scale_input = scale_weight
|
||||
scale_weight = torch.ones((), device=input.device, dtype=torch.float32)
|
||||
else:
|
||||
scale_weight = scale_weight.to(input.device)
|
||||
|
||||
if scale_input is None:
|
||||
scale_input = torch.ones((1), device=input.device, dtype=torch.float32)
|
||||
scale_input = torch.ones((), device=input.device, dtype=torch.float32)
|
||||
inn = input.reshape(-1, input.shape[2]).to(dtype)
|
||||
else:
|
||||
scale_input = scale_input.to(input.device)
|
||||
inn = (input * (1.0 / scale_input).to(input.dtype)).reshape(-1, input.shape[2]).to(dtype)
|
||||
|
||||
if bias is not None:
|
||||
o = torch._scaled_mm(inn, w, out_dtype=input.dtype, bias=bias, scale_a=scale_input, scale_b=scale_weight)
|
||||
@ -280,7 +282,11 @@ def fp8_linear(self, input):
|
||||
if isinstance(o, tuple):
|
||||
o = o[0]
|
||||
|
||||
if tensor_2d:
|
||||
return o.reshape(input.shape[0], -1)
|
||||
|
||||
return o.reshape((-1, input.shape[1], self.weight.shape[0]))
|
||||
|
||||
return None
|
||||
|
||||
class fp8_ops(manual_cast):
|
||||
@ -298,15 +304,63 @@ class fp8_ops(manual_cast):
|
||||
weight, bias = cast_bias_weight(self, input)
|
||||
return torch.nn.functional.linear(input, weight, bias)
|
||||
|
||||
def scaled_fp8_ops(fp8_matrix_mult=False, scale_input=False, override_dtype=None):
|
||||
class scaled_fp8_op(manual_cast):
|
||||
class Linear(manual_cast.Linear):
|
||||
def __init__(self, *args, **kwargs):
|
||||
if override_dtype is not None:
|
||||
kwargs['dtype'] = override_dtype
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def pick_operations(weight_dtype, compute_dtype, load_device=None, disable_fast_fp8=False, fp8_optimizations=False):
|
||||
if comfy.model_management.supports_fp8_compute(load_device):
|
||||
if (fp8_optimizations or args.fast) and not disable_fast_fp8:
|
||||
return fp8_ops
|
||||
def reset_parameters(self):
|
||||
if not hasattr(self, 'scale_weight'):
|
||||
self.scale_weight = torch.nn.parameter.Parameter(data=torch.ones((), device=self.weight.device, dtype=torch.float32), requires_grad=False)
|
||||
|
||||
if not scale_input:
|
||||
self.scale_input = None
|
||||
|
||||
if not hasattr(self, 'scale_input'):
|
||||
self.scale_input = torch.nn.parameter.Parameter(data=torch.ones((), device=self.weight.device, dtype=torch.float32), requires_grad=False)
|
||||
return None
|
||||
|
||||
def forward_comfy_cast_weights(self, input):
|
||||
if fp8_matrix_mult:
|
||||
out = fp8_linear(self, input)
|
||||
if out is not None:
|
||||
return out
|
||||
|
||||
weight, bias = cast_bias_weight(self, input)
|
||||
|
||||
if weight.numel() < input.numel(): #TODO: optimize
|
||||
return torch.nn.functional.linear(input, weight * self.scale_weight.to(device=weight.device, dtype=weight.dtype), bias)
|
||||
else:
|
||||
return torch.nn.functional.linear(input * self.scale_weight.to(device=weight.device, dtype=weight.dtype), weight, bias)
|
||||
|
||||
def convert_weight(self, weight, inplace=False, **kwargs):
|
||||
if inplace:
|
||||
weight *= self.scale_weight.to(device=weight.device, dtype=weight.dtype)
|
||||
return weight
|
||||
else:
|
||||
return weight * self.scale_weight.to(device=weight.device, dtype=weight.dtype)
|
||||
|
||||
def set_weight(self, weight, inplace_update=False, seed=None, **kwargs):
|
||||
weight = comfy.float.stochastic_rounding(weight / self.scale_weight.to(device=weight.device, dtype=weight.dtype), self.weight.dtype, seed=seed)
|
||||
if inplace_update:
|
||||
self.weight.data.copy_(weight)
|
||||
else:
|
||||
self.weight = torch.nn.Parameter(weight, requires_grad=False)
|
||||
|
||||
return scaled_fp8_op
|
||||
|
||||
def pick_operations(weight_dtype, compute_dtype, load_device=None, disable_fast_fp8=False, fp8_optimizations=False, scaled_fp8=None):
|
||||
fp8_compute = comfy.model_management.supports_fp8_compute(load_device)
|
||||
if scaled_fp8 is not None:
|
||||
return scaled_fp8_ops(fp8_matrix_mult=fp8_compute, scale_input=True, override_dtype=scaled_fp8)
|
||||
|
||||
if fp8_compute and (fp8_optimizations or args.fast) and not disable_fast_fp8:
|
||||
return fp8_ops
|
||||
|
||||
if compute_dtype is None or weight_dtype == compute_dtype:
|
||||
return disable_weight_init
|
||||
if args.fast and not disable_fast_fp8:
|
||||
if comfy.model_management.supports_fp8_compute(load_device):
|
||||
return fp8_ops
|
||||
|
||||
return manual_cast
|
||||
|
@ -358,11 +358,35 @@ def beta_scheduler(model_sampling, steps, alpha=0.6, beta=0.6):
|
||||
ts = numpy.rint(scipy.stats.beta.ppf(ts, alpha, beta) * total_timesteps)
|
||||
|
||||
sigs = []
|
||||
last_t = -1
|
||||
for t in ts:
|
||||
sigs += [float(model_sampling.sigmas[int(t)])]
|
||||
if t != last_t:
|
||||
sigs += [float(model_sampling.sigmas[int(t)])]
|
||||
last_t = t
|
||||
sigs += [0.0]
|
||||
return torch.FloatTensor(sigs)
|
||||
|
||||
# from: https://github.com/genmoai/models/blob/main/src/mochi_preview/infer.py#L41
|
||||
def linear_quadratic_schedule(model_sampling, steps, threshold_noise=0.025, linear_steps=None):
|
||||
if steps == 1:
|
||||
sigma_schedule = [1.0, 0.0]
|
||||
else:
|
||||
if linear_steps is None:
|
||||
linear_steps = steps // 2
|
||||
linear_sigma_schedule = [i * threshold_noise / linear_steps for i in range(linear_steps)]
|
||||
threshold_noise_step_diff = linear_steps - threshold_noise * steps
|
||||
quadratic_steps = steps - linear_steps
|
||||
quadratic_coef = threshold_noise_step_diff / (linear_steps * quadratic_steps ** 2)
|
||||
linear_coef = threshold_noise / linear_steps - 2 * threshold_noise_step_diff / (quadratic_steps ** 2)
|
||||
const = quadratic_coef * (linear_steps ** 2)
|
||||
quadratic_sigma_schedule = [
|
||||
quadratic_coef * (i ** 2) + linear_coef * i + const
|
||||
for i in range(linear_steps, steps)
|
||||
]
|
||||
sigma_schedule = linear_sigma_schedule + quadratic_sigma_schedule + [1.0]
|
||||
sigma_schedule = [1.0 - x for x in sigma_schedule]
|
||||
return torch.FloatTensor(sigma_schedule) * model_sampling.sigma_max.cpu()
|
||||
|
||||
def get_mask_aabb(masks):
|
||||
if masks.numel() == 0:
|
||||
return torch.zeros((0, 4), device=masks.device, dtype=torch.int)
|
||||
@ -729,7 +753,7 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model
|
||||
return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)
|
||||
|
||||
|
||||
SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "beta"]
|
||||
SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "beta", "linear_quadratic"]
|
||||
SAMPLER_NAMES = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"]
|
||||
|
||||
def calculate_sigmas(model_sampling, scheduler_name, steps):
|
||||
@ -747,6 +771,8 @@ def calculate_sigmas(model_sampling, scheduler_name, steps):
|
||||
sigmas = normal_scheduler(model_sampling, steps, sgm=True)
|
||||
elif scheduler_name == "beta":
|
||||
sigmas = beta_scheduler(model_sampling, steps)
|
||||
elif scheduler_name == "linear_quadratic":
|
||||
sigmas = linear_quadratic_schedule(model_sampling, steps)
|
||||
else:
|
||||
logging.error("error invalid scheduler {}".format(scheduler_name))
|
||||
return sigmas
|
||||
|
64
comfy/sd.py
64
comfy/sd.py
@ -7,6 +7,7 @@ from .ldm.models.autoencoder import AutoencoderKL, AutoencodingEngine
|
||||
from .ldm.cascade.stage_a import StageA
|
||||
from .ldm.cascade.stage_c_coder import StageC_coder
|
||||
from .ldm.audio.autoencoder import AudioOobleckVAE
|
||||
import comfy.ldm.genmo.vae.model
|
||||
import yaml
|
||||
|
||||
import comfy.utils
|
||||
@ -25,6 +26,7 @@ import comfy.text_encoders.aura_t5
|
||||
import comfy.text_encoders.hydit
|
||||
import comfy.text_encoders.flux
|
||||
import comfy.text_encoders.long_clipl
|
||||
import comfy.text_encoders.genmo
|
||||
|
||||
import comfy.model_patcher
|
||||
import comfy.lora
|
||||
@ -241,6 +243,13 @@ class VAE:
|
||||
self.process_output = lambda audio: audio
|
||||
self.process_input = lambda audio: audio
|
||||
self.working_dtypes = [torch.float16, torch.bfloat16, torch.float32]
|
||||
elif "blocks.2.blocks.3.stack.5.weight" in sd or "decoder.blocks.2.blocks.3.stack.5.weight" in sd: #genmo mochi vae
|
||||
if "blocks.2.blocks.3.stack.5.weight" in sd:
|
||||
sd = comfy.utils.state_dict_prefix_replace(sd, {"": "decoder."})
|
||||
self.first_stage_model = comfy.ldm.genmo.vae.model.VideoVAE()
|
||||
self.latent_channels = 12
|
||||
self.memory_used_decode = lambda shape, dtype: (1000 * shape[2] * shape[3] * shape[4] * (6 * 8 * 8)) * model_management.dtype_size(dtype)
|
||||
self.upscale_ratio = (lambda a: max(0, a * 6 - 5), 8, 8)
|
||||
else:
|
||||
logging.warning("WARNING: No VAE weights detected, VAE not initalized.")
|
||||
self.first_stage_model = None
|
||||
@ -296,6 +305,10 @@ class VAE:
|
||||
decode_fn = lambda a: self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)).float()
|
||||
return comfy.utils.tiled_scale_multidim(samples, decode_fn, tile=(tile_x,), overlap=overlap, upscale_amount=self.upscale_ratio, out_channels=self.output_channels, output_device=self.output_device)
|
||||
|
||||
def decode_tiled_3d(self, samples, tile_t=999, tile_x=32, tile_y=32, overlap=(1, 8, 8)):
|
||||
decode_fn = lambda a: self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)).float()
|
||||
return self.process_output(comfy.utils.tiled_scale_multidim(samples, decode_fn, tile=(tile_t, tile_x, tile_y), overlap=overlap, upscale_amount=self.upscale_ratio, out_channels=self.output_channels, output_device=self.output_device))
|
||||
|
||||
def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64):
|
||||
steps = pixel_samples.shape[0] * comfy.utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x, tile_y, overlap)
|
||||
steps += pixel_samples.shape[0] * comfy.utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x // 2, tile_y * 2, overlap)
|
||||
@ -314,6 +327,7 @@ class VAE:
|
||||
return comfy.utils.tiled_scale_multidim(samples, encode_fn, tile=(tile_x,), overlap=overlap, upscale_amount=(1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device)
|
||||
|
||||
def decode(self, samples_in):
|
||||
pixel_samples = None
|
||||
try:
|
||||
memory_used = self.memory_used_decode(samples_in.shape, self.vae_dtype)
|
||||
model_management.load_models_gpu([self.patcher], memory_required=memory_used)
|
||||
@ -321,16 +335,21 @@ class VAE:
|
||||
batch_number = int(free_memory / memory_used)
|
||||
batch_number = max(1, batch_number)
|
||||
|
||||
pixel_samples = torch.empty((samples_in.shape[0], self.output_channels) + tuple(map(lambda a: a * self.upscale_ratio, samples_in.shape[2:])), device=self.output_device)
|
||||
for x in range(0, samples_in.shape[0], batch_number):
|
||||
samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device)
|
||||
pixel_samples[x:x+batch_number] = self.process_output(self.first_stage_model.decode(samples).to(self.output_device).float())
|
||||
out = self.process_output(self.first_stage_model.decode(samples).to(self.output_device).float())
|
||||
if pixel_samples is None:
|
||||
pixel_samples = torch.empty((samples_in.shape[0],) + tuple(out.shape[1:]), device=self.output_device)
|
||||
pixel_samples[x:x+batch_number] = out
|
||||
except model_management.OOM_EXCEPTION as e:
|
||||
logging.warning("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
|
||||
if len(samples_in.shape) == 3:
|
||||
dims = samples_in.ndim - 2
|
||||
if dims == 1:
|
||||
pixel_samples = self.decode_tiled_1d(samples_in)
|
||||
else:
|
||||
elif dims == 2:
|
||||
pixel_samples = self.decode_tiled_(samples_in)
|
||||
elif dims == 3:
|
||||
pixel_samples = self.decode_tiled_3d(samples_in)
|
||||
|
||||
pixel_samples = pixel_samples.to(self.output_device).movedim(1,-1)
|
||||
return pixel_samples
|
||||
@ -398,6 +417,7 @@ class CLIPType(Enum):
|
||||
STABLE_AUDIO = 4
|
||||
HUNYUAN_DIT = 5
|
||||
FLUX = 6
|
||||
MOCHI = 7
|
||||
|
||||
def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}):
|
||||
clip_data = []
|
||||
@ -432,16 +452,14 @@ def detect_te_model(sd):
|
||||
return None
|
||||
|
||||
|
||||
def t5xxl_weight_dtype(clip_data):
|
||||
def t5xxl_detect(clip_data):
|
||||
weight_name = "encoder.block.23.layer.1.DenseReluDense.wi_1.weight"
|
||||
|
||||
dtype_t5 = None
|
||||
for sd in clip_data:
|
||||
weight = sd.get(weight_name, None)
|
||||
if weight is not None:
|
||||
dtype_t5 = weight.dtype
|
||||
break
|
||||
return dtype_t5
|
||||
if weight_name in sd:
|
||||
return comfy.text_encoders.sd3_clip.t5_xxl_detect(sd)
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}):
|
||||
@ -475,8 +493,12 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip
|
||||
clip_target.clip = comfy.text_encoders.sd2_clip.SD2ClipModel
|
||||
clip_target.tokenizer = comfy.text_encoders.sd2_clip.SD2Tokenizer
|
||||
elif te_model == TEModel.T5_XXL:
|
||||
clip_target.clip = comfy.text_encoders.sd3_clip.sd3_clip(clip_l=False, clip_g=False, t5=True, dtype_t5=t5xxl_weight_dtype(clip_data))
|
||||
clip_target.tokenizer = comfy.text_encoders.sd3_clip.SD3Tokenizer
|
||||
if clip_type == CLIPType.SD3:
|
||||
clip_target.clip = comfy.text_encoders.sd3_clip.sd3_clip(clip_l=False, clip_g=False, t5=True, **t5xxl_detect(clip_data))
|
||||
clip_target.tokenizer = comfy.text_encoders.sd3_clip.SD3Tokenizer
|
||||
else: #CLIPType.MOCHI
|
||||
clip_target.clip = comfy.text_encoders.genmo.mochi_te(**t5xxl_detect(clip_data))
|
||||
clip_target.tokenizer = comfy.text_encoders.genmo.MochiT5Tokenizer
|
||||
elif te_model == TEModel.T5_XL:
|
||||
clip_target.clip = comfy.text_encoders.aura_t5.AuraT5Model
|
||||
clip_target.tokenizer = comfy.text_encoders.aura_t5.AuraT5Tokenizer
|
||||
@ -493,19 +515,19 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip
|
||||
elif len(clip_data) == 2:
|
||||
if clip_type == CLIPType.SD3:
|
||||
te_models = [detect_te_model(clip_data[0]), detect_te_model(clip_data[1])]
|
||||
clip_target.clip = comfy.text_encoders.sd3_clip.sd3_clip(clip_l=TEModel.CLIP_L in te_models, clip_g=TEModel.CLIP_G in te_models, t5=TEModel.T5_XXL in te_models, dtype_t5=t5xxl_weight_dtype(clip_data))
|
||||
clip_target.clip = comfy.text_encoders.sd3_clip.sd3_clip(clip_l=TEModel.CLIP_L in te_models, clip_g=TEModel.CLIP_G in te_models, t5=TEModel.T5_XXL in te_models, **t5xxl_detect(clip_data))
|
||||
clip_target.tokenizer = comfy.text_encoders.sd3_clip.SD3Tokenizer
|
||||
elif clip_type == CLIPType.HUNYUAN_DIT:
|
||||
clip_target.clip = comfy.text_encoders.hydit.HyditModel
|
||||
clip_target.tokenizer = comfy.text_encoders.hydit.HyditTokenizer
|
||||
elif clip_type == CLIPType.FLUX:
|
||||
clip_target.clip = comfy.text_encoders.flux.flux_clip(dtype_t5=t5xxl_weight_dtype(clip_data))
|
||||
clip_target.clip = comfy.text_encoders.flux.flux_clip(**t5xxl_detect(clip_data))
|
||||
clip_target.tokenizer = comfy.text_encoders.flux.FluxTokenizer
|
||||
else:
|
||||
clip_target.clip = sdxl_clip.SDXLClipModel
|
||||
clip_target.tokenizer = sdxl_clip.SDXLTokenizer
|
||||
elif len(clip_data) == 3:
|
||||
clip_target.clip = comfy.text_encoders.sd3_clip.sd3_clip(dtype_t5=t5xxl_weight_dtype(clip_data))
|
||||
clip_target.clip = comfy.text_encoders.sd3_clip.sd3_clip(**t5xxl_detect(clip_data))
|
||||
clip_target.tokenizer = comfy.text_encoders.sd3_clip.SD3Tokenizer
|
||||
|
||||
parameters = 0
|
||||
@ -580,7 +602,7 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c
|
||||
return None
|
||||
|
||||
unet_weight_dtype = list(model_config.supported_inference_dtypes)
|
||||
if weight_dtype is not None:
|
||||
if weight_dtype is not None and model_config.scaled_fp8 is None:
|
||||
unet_weight_dtype.append(weight_dtype)
|
||||
|
||||
model_config.custom_operations = model_options.get("custom_operations", None)
|
||||
@ -649,6 +671,8 @@ def load_diffusion_model_state_dict(sd, model_options={}): #load unet in diffuse
|
||||
sd = temp_sd
|
||||
|
||||
parameters = comfy.utils.calculate_parameters(sd)
|
||||
weight_dtype = comfy.utils.weight_dtype(sd)
|
||||
|
||||
load_device = model_management.get_torch_device()
|
||||
model_config = model_detection.model_config_from_unet(sd, "")
|
||||
|
||||
@ -675,8 +699,12 @@ def load_diffusion_model_state_dict(sd, model_options={}): #load unet in diffuse
|
||||
logging.warning("{} {}".format(diffusers_keys[k], k))
|
||||
|
||||
offload_device = model_management.unet_offload_device()
|
||||
unet_weight_dtype = list(model_config.supported_inference_dtypes)
|
||||
if weight_dtype is not None and model_config.scaled_fp8 is None:
|
||||
unet_weight_dtype.append(weight_dtype)
|
||||
|
||||
if dtype is None:
|
||||
unet_dtype = model_management.unet_dtype(model_params=parameters, supported_dtypes=model_config.supported_inference_dtypes)
|
||||
unet_dtype = model_management.unet_dtype(model_params=parameters, supported_dtypes=unet_weight_dtype)
|
||||
else:
|
||||
unet_dtype = dtype
|
||||
|
||||
|
@ -94,11 +94,20 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder):
|
||||
config = json.load(f)
|
||||
|
||||
operations = model_options.get("custom_operations", None)
|
||||
scaled_fp8 = None
|
||||
|
||||
if operations is None:
|
||||
operations = comfy.ops.manual_cast
|
||||
scaled_fp8 = model_options.get("scaled_fp8", None)
|
||||
if scaled_fp8 is not None:
|
||||
operations = comfy.ops.scaled_fp8_ops(fp8_matrix_mult=False, override_dtype=scaled_fp8)
|
||||
else:
|
||||
operations = comfy.ops.manual_cast
|
||||
|
||||
self.operations = operations
|
||||
self.transformer = model_class(config, dtype, device, self.operations)
|
||||
if scaled_fp8 is not None:
|
||||
self.transformer.scaled_fp8 = torch.nn.Parameter(torch.tensor([], dtype=scaled_fp8))
|
||||
|
||||
self.num_layers = self.transformer.num_layers
|
||||
|
||||
self.max_length = max_length
|
||||
|
@ -10,6 +10,7 @@ import comfy.text_encoders.sa_t5
|
||||
import comfy.text_encoders.aura_t5
|
||||
import comfy.text_encoders.hydit
|
||||
import comfy.text_encoders.flux
|
||||
import comfy.text_encoders.genmo
|
||||
|
||||
from . import supported_models_base
|
||||
from . import latent_formats
|
||||
@ -529,12 +530,11 @@ class SD3(supported_models_base.BASE):
|
||||
clip_l = True
|
||||
if "{}clip_g.transformer.text_model.final_layer_norm.weight".format(pref) in state_dict:
|
||||
clip_g = True
|
||||
t5_key = "{}t5xxl.transformer.encoder.final_layer_norm.weight".format(pref)
|
||||
if t5_key in state_dict:
|
||||
t5_detect = comfy.text_encoders.sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref))
|
||||
if "dtype_t5" in t5_detect:
|
||||
t5 = True
|
||||
dtype_t5 = state_dict[t5_key].dtype
|
||||
|
||||
return supported_models_base.ClipTarget(comfy.text_encoders.sd3_clip.SD3Tokenizer, comfy.text_encoders.sd3_clip.sd3_clip(clip_l=clip_l, clip_g=clip_g, t5=t5, dtype_t5=dtype_t5))
|
||||
return supported_models_base.ClipTarget(comfy.text_encoders.sd3_clip.SD3Tokenizer, comfy.text_encoders.sd3_clip.sd3_clip(clip_l=clip_l, clip_g=clip_g, t5=t5, **t5_detect))
|
||||
|
||||
class StableAudio(supported_models_base.BASE):
|
||||
unet_config = {
|
||||
@ -653,11 +653,8 @@ class Flux(supported_models_base.BASE):
|
||||
|
||||
def clip_target(self, state_dict={}):
|
||||
pref = self.text_encoder_key_prefix[0]
|
||||
t5_key = "{}t5xxl.transformer.encoder.final_layer_norm.weight".format(pref)
|
||||
dtype_t5 = None
|
||||
if t5_key in state_dict:
|
||||
dtype_t5 = state_dict[t5_key].dtype
|
||||
return supported_models_base.ClipTarget(comfy.text_encoders.flux.FluxTokenizer, comfy.text_encoders.flux.flux_clip(dtype_t5=dtype_t5))
|
||||
t5_detect = comfy.text_encoders.sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref))
|
||||
return supported_models_base.ClipTarget(comfy.text_encoders.flux.FluxTokenizer, comfy.text_encoders.flux.flux_clip(**t5_detect))
|
||||
|
||||
class FluxSchnell(Flux):
|
||||
unet_config = {
|
||||
@ -674,7 +671,36 @@ class FluxSchnell(Flux):
|
||||
out = model_base.Flux(self, model_type=model_base.ModelType.FLOW, device=device)
|
||||
return out
|
||||
|
||||
class GenmoMochi(supported_models_base.BASE):
|
||||
unet_config = {
|
||||
"image_model": "mochi_preview",
|
||||
}
|
||||
|
||||
models = [Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, HunyuanDiT, HunyuanDiT1, Flux, FluxSchnell]
|
||||
sampling_settings = {
|
||||
"multiplier": 1.0,
|
||||
"shift": 6.0,
|
||||
}
|
||||
|
||||
unet_extra_config = {}
|
||||
latent_format = latent_formats.Mochi
|
||||
|
||||
memory_usage_factor = 2.0 #TODO
|
||||
|
||||
supported_inference_dtypes = [torch.bfloat16, torch.float32]
|
||||
|
||||
vae_key_prefix = ["vae."]
|
||||
text_encoder_key_prefix = ["text_encoders."]
|
||||
|
||||
def get_model(self, state_dict, prefix="", device=None):
|
||||
out = model_base.GenmoMochi(self, device=device)
|
||||
return out
|
||||
|
||||
def clip_target(self, state_dict={}):
|
||||
pref = self.text_encoder_key_prefix[0]
|
||||
t5_detect = comfy.text_encoders.sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref))
|
||||
return supported_models_base.ClipTarget(comfy.text_encoders.genmo.MochiT5Tokenizer, comfy.text_encoders.genmo.mochi_te(**t5_detect))
|
||||
|
||||
|
||||
models = [Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, HunyuanDiT, HunyuanDiT1, Flux, FluxSchnell, GenmoMochi]
|
||||
|
||||
models += [SVD_img2vid]
|
||||
|
@ -49,6 +49,7 @@ class BASE:
|
||||
|
||||
manual_cast_dtype = None
|
||||
custom_operations = None
|
||||
scaled_fp8 = None
|
||||
optimizations = {"fp8": False}
|
||||
|
||||
@classmethod
|
||||
|
@ -1,15 +1,11 @@
|
||||
from comfy import sd1_clip
|
||||
import comfy.text_encoders.t5
|
||||
import comfy.text_encoders.sd3_clip
|
||||
import comfy.model_management
|
||||
from transformers import T5TokenizerFast
|
||||
import torch
|
||||
import os
|
||||
|
||||
class T5XXLModel(sd1_clip.SDClipModel):
|
||||
def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, model_options={}):
|
||||
textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_config_xxl.json")
|
||||
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5, model_options=model_options)
|
||||
|
||||
class T5XXLTokenizer(sd1_clip.SDTokenizer):
|
||||
def __init__(self, embedding_directory=None, tokenizer_data={}):
|
||||
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer")
|
||||
@ -41,7 +37,7 @@ class FluxClipModel(torch.nn.Module):
|
||||
dtype_t5 = comfy.model_management.pick_weight_dtype(dtype_t5, dtype, device)
|
||||
clip_l_class = model_options.get("clip_l_class", sd1_clip.SDClipModel)
|
||||
self.clip_l = clip_l_class(device=device, dtype=dtype, return_projected_pooled=False, model_options=model_options)
|
||||
self.t5xxl = T5XXLModel(device=device, dtype=dtype_t5, model_options=model_options)
|
||||
self.t5xxl = comfy.text_encoders.sd3_clip.T5XXLModel(device=device, dtype=dtype_t5, model_options=model_options)
|
||||
self.dtypes = set([dtype, dtype_t5])
|
||||
|
||||
def set_clip_options(self, options):
|
||||
@ -66,8 +62,11 @@ class FluxClipModel(torch.nn.Module):
|
||||
else:
|
||||
return self.t5xxl.load_sd(sd)
|
||||
|
||||
def flux_clip(dtype_t5=None):
|
||||
def flux_clip(dtype_t5=None, t5xxl_scaled_fp8=None):
|
||||
class FluxClipModel_(FluxClipModel):
|
||||
def __init__(self, device="cpu", dtype=None, model_options={}):
|
||||
if t5xxl_scaled_fp8 is not None and "t5xxl_scaled_fp8" not in model_options:
|
||||
model_options = model_options.copy()
|
||||
model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8
|
||||
super().__init__(dtype_t5=dtype_t5, device=device, dtype=dtype, model_options=model_options)
|
||||
return FluxClipModel_
|
||||
|
38
comfy/text_encoders/genmo.py
Normal file
38
comfy/text_encoders/genmo.py
Normal file
@ -0,0 +1,38 @@
|
||||
from comfy import sd1_clip
|
||||
import comfy.text_encoders.sd3_clip
|
||||
import os
|
||||
from transformers import T5TokenizerFast
|
||||
|
||||
|
||||
class T5XXLModel(comfy.text_encoders.sd3_clip.T5XXLModel):
|
||||
def __init__(self, **kwargs):
|
||||
kwargs["attention_mask"] = True
|
||||
super().__init__(**kwargs)
|
||||
|
||||
|
||||
class MochiT5XXL(sd1_clip.SD1ClipModel):
|
||||
def __init__(self, device="cpu", dtype=None, model_options={}):
|
||||
super().__init__(device=device, dtype=dtype, clip_name="t5xxl", clip_model=T5XXLModel, model_options=model_options)
|
||||
|
||||
|
||||
class T5XXLTokenizer(sd1_clip.SDTokenizer):
|
||||
def __init__(self, embedding_directory=None, tokenizer_data={}):
|
||||
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer")
|
||||
super().__init__(tokenizer_path, embedding_directory=embedding_directory, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256)
|
||||
|
||||
|
||||
class MochiT5Tokenizer(sd1_clip.SD1Tokenizer):
|
||||
def __init__(self, embedding_directory=None, tokenizer_data={}):
|
||||
super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="t5xxl", tokenizer=T5XXLTokenizer)
|
||||
|
||||
|
||||
def mochi_te(dtype_t5=None, t5xxl_scaled_fp8=None):
|
||||
class MochiTEModel_(MochiT5XXL):
|
||||
def __init__(self, device="cpu", dtype=None, model_options={}):
|
||||
if t5xxl_scaled_fp8 is not None and "t5xxl_scaled_fp8" not in model_options:
|
||||
model_options = model_options.copy()
|
||||
model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8
|
||||
if dtype is None:
|
||||
dtype = dtype_t5
|
||||
super().__init__(device=device, dtype=dtype, model_options=model_options)
|
||||
return MochiTEModel_
|
@ -10,8 +10,26 @@ import logging
|
||||
class T5XXLModel(sd1_clip.SDClipModel):
|
||||
def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None, attention_mask=False, model_options={}):
|
||||
textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_config_xxl.json")
|
||||
t5xxl_scaled_fp8 = model_options.get("t5xxl_scaled_fp8", None)
|
||||
if t5xxl_scaled_fp8 is not None:
|
||||
model_options = model_options.copy()
|
||||
model_options["scaled_fp8"] = t5xxl_scaled_fp8
|
||||
|
||||
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options)
|
||||
|
||||
|
||||
def t5_xxl_detect(state_dict, prefix=""):
|
||||
out = {}
|
||||
t5_key = "{}encoder.final_layer_norm.weight".format(prefix)
|
||||
if t5_key in state_dict:
|
||||
out["dtype_t5"] = state_dict[t5_key].dtype
|
||||
|
||||
scaled_fp8_key = "{}scaled_fp8".format(prefix)
|
||||
if scaled_fp8_key in state_dict:
|
||||
out["t5xxl_scaled_fp8"] = state_dict[scaled_fp8_key].dtype
|
||||
|
||||
return out
|
||||
|
||||
class T5XXLTokenizer(sd1_clip.SDTokenizer):
|
||||
def __init__(self, embedding_directory=None, tokenizer_data={}):
|
||||
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer")
|
||||
@ -139,8 +157,11 @@ class SD3ClipModel(torch.nn.Module):
|
||||
else:
|
||||
return self.t5xxl.load_sd(sd)
|
||||
|
||||
def sd3_clip(clip_l=True, clip_g=True, t5=True, dtype_t5=None, t5_attention_mask=False):
|
||||
def sd3_clip(clip_l=True, clip_g=True, t5=True, dtype_t5=None, t5xxl_scaled_fp8=None, t5_attention_mask=False):
|
||||
class SD3ClipModel_(SD3ClipModel):
|
||||
def __init__(self, device="cpu", dtype=None, model_options={}):
|
||||
if t5xxl_scaled_fp8 is not None and "t5xxl_scaled_fp8" not in model_options:
|
||||
model_options = model_options.copy()
|
||||
model_options["t5xxl_scaled_fp8"] = t5xxl_scaled_fp8
|
||||
super().__init__(clip_l=clip_l, clip_g=clip_g, t5=t5, dtype_t5=dtype_t5, t5_attention_mask=t5_attention_mask, device=device, dtype=dtype, model_options=model_options)
|
||||
return SD3ClipModel_
|
||||
|
@ -68,7 +68,7 @@ def weight_dtype(sd, prefix=""):
|
||||
for k in sd.keys():
|
||||
if k.startswith(prefix):
|
||||
w = sd[k]
|
||||
dtypes[w.dtype] = dtypes.get(w.dtype, 0) + 1
|
||||
dtypes[w.dtype] = dtypes.get(w.dtype, 0) + w.numel()
|
||||
|
||||
if len(dtypes) == 0:
|
||||
return None
|
||||
@ -690,9 +690,14 @@ def lanczos(samples, width, height):
|
||||
return result.to(samples.device, samples.dtype)
|
||||
|
||||
def common_upscale(samples, width, height, upscale_method, crop):
|
||||
orig_shape = tuple(samples.shape)
|
||||
if len(orig_shape) > 4:
|
||||
samples = samples.reshape(samples.shape[0], samples.shape[1], -1, samples.shape[-2], samples.shape[-1])
|
||||
samples = samples.movedim(2, 1)
|
||||
samples = samples.reshape(-1, orig_shape[1], orig_shape[-2], orig_shape[-1])
|
||||
if crop == "center":
|
||||
old_width = samples.shape[3]
|
||||
old_height = samples.shape[2]
|
||||
old_width = samples.shape[-1]
|
||||
old_height = samples.shape[-2]
|
||||
old_aspect = old_width / old_height
|
||||
new_aspect = width / height
|
||||
x = 0
|
||||
@ -701,16 +706,22 @@ def common_upscale(samples, width, height, upscale_method, crop):
|
||||
x = round((old_width - old_width * (new_aspect / old_aspect)) / 2)
|
||||
elif old_aspect < new_aspect:
|
||||
y = round((old_height - old_height * (old_aspect / new_aspect)) / 2)
|
||||
s = samples[:,:,y:old_height-y,x:old_width-x]
|
||||
s = samples.narrow(-2, y, old_height - y * 2).narrow(-1, x, old_width - x * 2)
|
||||
else:
|
||||
s = samples
|
||||
|
||||
if upscale_method == "bislerp":
|
||||
return bislerp(s, width, height)
|
||||
out = bislerp(s, width, height)
|
||||
elif upscale_method == "lanczos":
|
||||
return lanczos(s, width, height)
|
||||
out = lanczos(s, width, height)
|
||||
else:
|
||||
return torch.nn.functional.interpolate(s, size=(height, width), mode=upscale_method)
|
||||
out = torch.nn.functional.interpolate(s, size=(height, width), mode=upscale_method)
|
||||
|
||||
if len(orig_shape) == 4:
|
||||
return out
|
||||
|
||||
out = out.reshape((orig_shape[0], -1, orig_shape[1]) + (height, width))
|
||||
return out.movedim(2, 1).reshape(orig_shape[:-2] + (height, width))
|
||||
|
||||
def get_tiled_scale_steps(width, height, tile_x, tile_y, overlap):
|
||||
rows = 1 if height <= tile_y else math.ceil((height - overlap) / (tile_y - overlap))
|
||||
@ -720,7 +731,27 @@ def get_tiled_scale_steps(width, height, tile_x, tile_y, overlap):
|
||||
@torch.inference_mode()
|
||||
def tiled_scale_multidim(samples, function, tile=(64, 64), overlap = 8, upscale_amount = 4, out_channels = 3, output_device="cpu", pbar = None):
|
||||
dims = len(tile)
|
||||
output = torch.empty([samples.shape[0], out_channels] + list(map(lambda a: round(a * upscale_amount), samples.shape[2:])), device=output_device)
|
||||
|
||||
if not (isinstance(upscale_amount, (tuple, list))):
|
||||
upscale_amount = [upscale_amount] * dims
|
||||
|
||||
if not (isinstance(overlap, (tuple, list))):
|
||||
overlap = [overlap] * dims
|
||||
|
||||
def get_upscale(dim, val):
|
||||
up = upscale_amount[dim]
|
||||
if callable(up):
|
||||
return up(val)
|
||||
else:
|
||||
return up * val
|
||||
|
||||
def mult_list_upscale(a):
|
||||
out = []
|
||||
for i in range(len(a)):
|
||||
out.append(round(get_upscale(i, a[i])))
|
||||
return out
|
||||
|
||||
output = torch.empty([samples.shape[0], out_channels] + mult_list_upscale(samples.shape[2:]), device=output_device)
|
||||
|
||||
for b in range(samples.shape[0]):
|
||||
s = samples[b:b+1]
|
||||
@ -732,27 +763,27 @@ def tiled_scale_multidim(samples, function, tile=(64, 64), overlap = 8, upscale_
|
||||
pbar.update(1)
|
||||
continue
|
||||
|
||||
out = torch.zeros([s.shape[0], out_channels] + list(map(lambda a: round(a * upscale_amount), s.shape[2:])), device=output_device)
|
||||
out_div = torch.zeros([s.shape[0], out_channels] + list(map(lambda a: round(a * upscale_amount), s.shape[2:])), device=output_device)
|
||||
out = torch.zeros([s.shape[0], out_channels] + mult_list_upscale(s.shape[2:]), device=output_device)
|
||||
out_div = torch.zeros([s.shape[0], out_channels] + mult_list_upscale(s.shape[2:]), device=output_device)
|
||||
|
||||
positions = [range(0, s.shape[d+2], tile[d] - overlap) if s.shape[d+2] > tile[d] else [0] for d in range(dims)]
|
||||
positions = [range(0, s.shape[d+2], tile[d] - overlap[d]) if s.shape[d+2] > tile[d] else [0] for d in range(dims)]
|
||||
|
||||
for it in itertools.product(*positions):
|
||||
s_in = s
|
||||
upscaled = []
|
||||
|
||||
for d in range(dims):
|
||||
pos = max(0, min(s.shape[d + 2] - overlap, it[d]))
|
||||
pos = max(0, min(s.shape[d + 2] - (overlap[d] + 1), it[d]))
|
||||
l = min(tile[d], s.shape[d + 2] - pos)
|
||||
s_in = s_in.narrow(d + 2, pos, l)
|
||||
upscaled.append(round(pos * upscale_amount))
|
||||
upscaled.append(round(get_upscale(d, pos)))
|
||||
|
||||
ps = function(s_in).to(output_device)
|
||||
mask = torch.ones_like(ps)
|
||||
feather = round(overlap * upscale_amount)
|
||||
|
||||
for t in range(feather):
|
||||
for d in range(2, dims + 2):
|
||||
for d in range(2, dims + 2):
|
||||
feather = round(get_upscale(d - 2, overlap[d - 2]))
|
||||
for t in range(feather):
|
||||
a = (t + 1) / feather
|
||||
mask.narrow(d, t, 1).mul_(a)
|
||||
mask.narrow(d, mask.shape[d] - 1 - t, 1).mul_(a)
|
||||
|
@ -1,4 +1,5 @@
|
||||
import comfy.utils
|
||||
import comfy_extras.nodes_post_processing
|
||||
import torch
|
||||
|
||||
def reshape_latent_to(target_shape, latent):
|
||||
@ -145,6 +146,131 @@ class LatentBatchSeedBehavior:
|
||||
|
||||
return (samples_out,)
|
||||
|
||||
class LatentApplyOperation:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "samples": ("LATENT",),
|
||||
"operation": ("LATENT_OPERATION",),
|
||||
}}
|
||||
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "op"
|
||||
|
||||
CATEGORY = "latent/advanced/operations"
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def op(self, samples, operation):
|
||||
samples_out = samples.copy()
|
||||
|
||||
s1 = samples["samples"]
|
||||
samples_out["samples"] = operation(latent=s1)
|
||||
return (samples_out,)
|
||||
|
||||
class LatentApplyOperationCFG:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "model": ("MODEL",),
|
||||
"operation": ("LATENT_OPERATION",),
|
||||
}}
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "latent/advanced/operations"
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def patch(self, model, operation):
|
||||
m = model.clone()
|
||||
|
||||
def pre_cfg_function(args):
|
||||
conds_out = args["conds_out"]
|
||||
if len(conds_out) == 2:
|
||||
conds_out[0] = operation(latent=(conds_out[0] - conds_out[1])) + conds_out[1]
|
||||
else:
|
||||
conds_out[0] = operation(latent=conds_out[0])
|
||||
return conds_out
|
||||
|
||||
m.set_model_sampler_pre_cfg_function(pre_cfg_function)
|
||||
return (m, )
|
||||
|
||||
class LatentOperationTonemapReinhard:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}),
|
||||
}}
|
||||
|
||||
RETURN_TYPES = ("LATENT_OPERATION",)
|
||||
FUNCTION = "op"
|
||||
|
||||
CATEGORY = "latent/advanced/operations"
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def op(self, multiplier):
|
||||
def tonemap_reinhard(latent, **kwargs):
|
||||
latent_vector_magnitude = (torch.linalg.vector_norm(latent, dim=(1)) + 0.0000000001)[:,None]
|
||||
normalized_latent = latent / latent_vector_magnitude
|
||||
|
||||
mean = torch.mean(latent_vector_magnitude, dim=(1,2,3), keepdim=True)
|
||||
std = torch.std(latent_vector_magnitude, dim=(1,2,3), keepdim=True)
|
||||
|
||||
top = (std * 5 + mean) * multiplier
|
||||
|
||||
#reinhard
|
||||
latent_vector_magnitude *= (1.0 / top)
|
||||
new_magnitude = latent_vector_magnitude / (latent_vector_magnitude + 1.0)
|
||||
new_magnitude *= top
|
||||
|
||||
return normalized_latent * new_magnitude
|
||||
return (tonemap_reinhard,)
|
||||
|
||||
class LatentOperationSharpen:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"sharpen_radius": ("INT", {
|
||||
"default": 9,
|
||||
"min": 1,
|
||||
"max": 31,
|
||||
"step": 1
|
||||
}),
|
||||
"sigma": ("FLOAT", {
|
||||
"default": 1.0,
|
||||
"min": 0.1,
|
||||
"max": 10.0,
|
||||
"step": 0.1
|
||||
}),
|
||||
"alpha": ("FLOAT", {
|
||||
"default": 0.1,
|
||||
"min": 0.0,
|
||||
"max": 5.0,
|
||||
"step": 0.01
|
||||
}),
|
||||
}}
|
||||
|
||||
RETURN_TYPES = ("LATENT_OPERATION",)
|
||||
FUNCTION = "op"
|
||||
|
||||
CATEGORY = "latent/advanced/operations"
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def op(self, sharpen_radius, sigma, alpha):
|
||||
def sharpen(latent, **kwargs):
|
||||
luminance = (torch.linalg.vector_norm(latent, dim=(1)) + 1e-6)[:,None]
|
||||
normalized_latent = latent / luminance
|
||||
channels = latent.shape[1]
|
||||
|
||||
kernel_size = sharpen_radius * 2 + 1
|
||||
kernel = comfy_extras.nodes_post_processing.gaussian_kernel(kernel_size, sigma, device=luminance.device)
|
||||
center = kernel_size // 2
|
||||
|
||||
kernel *= alpha * -10
|
||||
kernel[center, center] = kernel[center, center] - kernel.sum() + 1.0
|
||||
|
||||
padded_image = torch.nn.functional.pad(normalized_latent, (sharpen_radius,sharpen_radius,sharpen_radius,sharpen_radius), 'reflect')
|
||||
sharpened = torch.nn.functional.conv2d(padded_image, kernel.repeat(channels, 1, 1).unsqueeze(1), padding=kernel_size // 2, groups=channels)[:,:,sharpen_radius:-sharpen_radius, sharpen_radius:-sharpen_radius]
|
||||
|
||||
return luminance * sharpened
|
||||
return (sharpen,)
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"LatentAdd": LatentAdd,
|
||||
"LatentSubtract": LatentSubtract,
|
||||
@ -152,4 +278,8 @@ NODE_CLASS_MAPPINGS = {
|
||||
"LatentInterpolate": LatentInterpolate,
|
||||
"LatentBatch": LatentBatch,
|
||||
"LatentBatchSeedBehavior": LatentBatchSeedBehavior,
|
||||
"LatentApplyOperation": LatentApplyOperation,
|
||||
"LatentApplyOperationCFG": LatentApplyOperationCFG,
|
||||
"LatentOperationTonemapReinhard": LatentOperationTonemapReinhard,
|
||||
"LatentOperationSharpen": LatentOperationSharpen,
|
||||
}
|
||||
|
@ -82,8 +82,8 @@ class LoraSave:
|
||||
"lora_type": (tuple(LORA_TYPES.keys()),),
|
||||
"bias_diff": ("BOOLEAN", {"default": True}),
|
||||
},
|
||||
"optional": {"model_diff": ("MODEL",),
|
||||
"text_encoder_diff": ("CLIP",)},
|
||||
"optional": {"model_diff": ("MODEL", {"tooltip": "The ModelSubtract output to be converted to a lora."}),
|
||||
"text_encoder_diff": ("CLIP", {"tooltip": "The CLIPSubtract output to be converted to a lora."})},
|
||||
}
|
||||
RETURN_TYPES = ()
|
||||
FUNCTION = "save"
|
||||
@ -113,3 +113,7 @@ class LoraSave:
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"LoraSave": LoraSave
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"LoraSave": "Extract and Save Lora"
|
||||
}
|
||||
|
26
comfy_extras/nodes_mochi.py
Normal file
26
comfy_extras/nodes_mochi.py
Normal file
@ -0,0 +1,26 @@
|
||||
import nodes
|
||||
import torch
|
||||
import comfy.model_management
|
||||
|
||||
class EmptyMochiLatentVideo:
|
||||
def __init__(self):
|
||||
self.device = comfy.model_management.intermediate_device()
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "width": ("INT", {"default": 848, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
|
||||
"height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
|
||||
"length": ("INT", {"default": 25, "min": 7, "max": nodes.MAX_RESOLUTION, "step": 6}),
|
||||
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "generate"
|
||||
|
||||
CATEGORY = "latent/mochi"
|
||||
|
||||
def generate(self, width, height, length, batch_size=1):
|
||||
latent = torch.zeros([batch_size, 12, ((length - 1) // 6) + 1, height // 8, width // 8], device=self.device)
|
||||
return ({"samples":latent}, )
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"EmptyMochiLatentVideo": EmptyMochiLatentVideo,
|
||||
}
|
@ -101,10 +101,34 @@ class ModelMergeFlux1(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
return {"required": arg_dict}
|
||||
|
||||
class ModelMergeSD35_Large(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
arg_dict = { "model1": ("MODEL",),
|
||||
"model2": ("MODEL",)}
|
||||
|
||||
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
|
||||
|
||||
arg_dict["pos_embed."] = argument
|
||||
arg_dict["x_embedder."] = argument
|
||||
arg_dict["context_embedder."] = argument
|
||||
arg_dict["y_embedder."] = argument
|
||||
arg_dict["t_embedder."] = argument
|
||||
|
||||
for i in range(38):
|
||||
arg_dict["joint_blocks.{}.".format(i)] = argument
|
||||
|
||||
arg_dict["final_layer."] = argument
|
||||
|
||||
return {"required": arg_dict}
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"ModelMergeSD1": ModelMergeSD1,
|
||||
"ModelMergeSD2": ModelMergeSD1, #SD1 and SD2 have the same blocks
|
||||
"ModelMergeSDXL": ModelMergeSDXL,
|
||||
"ModelMergeSD3_2B": ModelMergeSD3_2B,
|
||||
"ModelMergeFlux1": ModelMergeFlux1,
|
||||
"ModelMergeSD35_Large": ModelMergeSD35_Large,
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ import comfy.sd
|
||||
import comfy.model_management
|
||||
import nodes
|
||||
import torch
|
||||
|
||||
import re
|
||||
class TripleCLIPLoader:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
@ -95,11 +95,70 @@ class ControlNetApplySD3(nodes.ControlNetApplyAdvanced):
|
||||
CATEGORY = "conditioning/controlnet"
|
||||
DEPRECATED = True
|
||||
|
||||
class SkipLayerGuidanceSD3:
|
||||
'''
|
||||
Enhance guidance towards detailed dtructure by having another set of CFG negative with skipped layers.
|
||||
Inspired by Perturbed Attention Guidance (https://arxiv.org/abs/2403.17377)
|
||||
Experimental implementation by Dango233@StabilityAI.
|
||||
'''
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"model": ("MODEL", ),
|
||||
"layers": ("STRING", {"default": "7, 8, 9", "multiline": False}),
|
||||
"scale": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 10.0, "step": 0.1}),
|
||||
"start_percent": ("FLOAT", {"default": 0.01, "min": 0.0, "max": 1.0, "step": 0.001}),
|
||||
"end_percent": ("FLOAT", {"default": 0.15, "min": 0.0, "max": 1.0, "step": 0.001})
|
||||
}}
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
FUNCTION = "skip_guidance"
|
||||
|
||||
CATEGORY = "advanced/guidance"
|
||||
|
||||
|
||||
def skip_guidance(self, model, layers, scale, start_percent, end_percent):
|
||||
if layers == "" or layers == None:
|
||||
return (model, )
|
||||
# check if layer is comma separated integers
|
||||
def skip(args, extra_args):
|
||||
return args
|
||||
|
||||
model_sampling = model.get_model_object("model_sampling")
|
||||
sigma_start = model_sampling.percent_to_sigma(start_percent)
|
||||
sigma_end = model_sampling.percent_to_sigma(end_percent)
|
||||
|
||||
def post_cfg_function(args):
|
||||
model = args["model"]
|
||||
cond_pred = args["cond_denoised"]
|
||||
cond = args["cond"]
|
||||
cfg_result = args["denoised"]
|
||||
sigma = args["sigma"]
|
||||
x = args["input"]
|
||||
model_options = args["model_options"].copy()
|
||||
|
||||
for layer in layers:
|
||||
model_options = comfy.model_patcher.set_model_options_patch_replace(model_options, skip, "dit", "double_block", layer)
|
||||
model_sampling.percent_to_sigma(start_percent)
|
||||
|
||||
sigma_ = sigma[0].item()
|
||||
if scale > 0 and sigma_ >= sigma_end and sigma_ <= sigma_start:
|
||||
(slg,) = comfy.samplers.calc_cond_batch(model, [cond], x, sigma, model_options)
|
||||
cfg_result = cfg_result + (cond_pred - slg) * scale
|
||||
return cfg_result
|
||||
|
||||
layers = re.findall(r'\d+', layers)
|
||||
layers = [int(i) for i in layers]
|
||||
m = model.clone()
|
||||
m.set_model_sampler_post_cfg_function(post_cfg_function)
|
||||
|
||||
return (m, )
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"TripleCLIPLoader": TripleCLIPLoader,
|
||||
"EmptySD3LatentImage": EmptySD3LatentImage,
|
||||
"CLIPTextEncodeSD3": CLIPTextEncodeSD3,
|
||||
"ControlNetApplySD3": ControlNetApplySD3,
|
||||
"SkipLayerGuidanceSD3": SkipLayerGuidanceSD3,
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
|
18
nodes.py
18
nodes.py
@ -281,7 +281,10 @@ class VAEDecode:
|
||||
DESCRIPTION = "Decodes latent images back into pixel space images."
|
||||
|
||||
def decode(self, vae, samples):
|
||||
return (vae.decode(samples["samples"]), )
|
||||
images = vae.decode(samples["samples"])
|
||||
if len(images.shape) == 5: #Combine batches
|
||||
images = images.reshape(-1, images.shape[-3], images.shape[-2], images.shape[-1])
|
||||
return (images, )
|
||||
|
||||
class VAEDecodeTiled:
|
||||
@classmethod
|
||||
@ -886,7 +889,7 @@ class CLIPLoader:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
|
||||
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio"], ),
|
||||
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi"], ),
|
||||
}}
|
||||
RETURN_TYPES = ("CLIP",)
|
||||
FUNCTION = "load_clip"
|
||||
@ -900,6 +903,8 @@ class CLIPLoader:
|
||||
clip_type = comfy.sd.CLIPType.SD3
|
||||
elif type == "stable_audio":
|
||||
clip_type = comfy.sd.CLIPType.STABLE_AUDIO
|
||||
elif type == "mochi":
|
||||
clip_type = comfy.sd.CLIPType.MOCHI
|
||||
else:
|
||||
clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION
|
||||
|
||||
@ -1179,10 +1184,10 @@ class LatentUpscale:
|
||||
|
||||
if width == 0:
|
||||
height = max(64, height)
|
||||
width = max(64, round(samples["samples"].shape[3] * height / samples["samples"].shape[2]))
|
||||
width = max(64, round(samples["samples"].shape[-1] * height / samples["samples"].shape[-2]))
|
||||
elif height == 0:
|
||||
width = max(64, width)
|
||||
height = max(64, round(samples["samples"].shape[2] * width / samples["samples"].shape[3]))
|
||||
height = max(64, round(samples["samples"].shape[-2] * width / samples["samples"].shape[-1]))
|
||||
else:
|
||||
width = max(64, width)
|
||||
height = max(64, height)
|
||||
@ -1204,8 +1209,8 @@ class LatentUpscaleBy:
|
||||
|
||||
def upscale(self, samples, upscale_method, scale_by):
|
||||
s = samples.copy()
|
||||
width = round(samples["samples"].shape[3] * scale_by)
|
||||
height = round(samples["samples"].shape[2] * scale_by)
|
||||
width = round(samples["samples"].shape[-1] * scale_by)
|
||||
height = round(samples["samples"].shape[-2] * scale_by)
|
||||
s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, "disabled")
|
||||
return (s,)
|
||||
|
||||
@ -2111,6 +2116,7 @@ def init_builtin_extra_nodes():
|
||||
"nodes_flux.py",
|
||||
"nodes_lora_extract.py",
|
||||
"nodes_torch_compile.py",
|
||||
"nodes_mochi.py",
|
||||
]
|
||||
|
||||
import_failed = []
|
||||
|
@ -40,7 +40,7 @@ class BinaryEventTypes:
|
||||
async def send_socket_catch_exception(function, message):
|
||||
try:
|
||||
await function(message)
|
||||
except (aiohttp.ClientError, aiohttp.ClientPayloadError, ConnectionResetError) as err:
|
||||
except (aiohttp.ClientError, aiohttp.ClientPayloadError, ConnectionResetError, BrokenPipeError, ConnectionError) as err:
|
||||
logging.warning("send error: {}".format(err))
|
||||
|
||||
def get_comfyui_version():
|
||||
|
103
web/assets/ExtensionPanel-DZLYjWBj.js
generated
vendored
Normal file
103
web/assets/ExtensionPanel-DZLYjWBj.js
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { d as defineComponent, bK as useExtensionStore, u as useSettingStore, r as ref, o as onMounted, q as computed, g as openBlock, h as createElementBlock, i as createVNode, y as withCtx, z as unref, bL as script$1, A as createBaseVNode, x as createBlock, M as Fragment, N as renderList, am as toDisplayString, ap as createTextVNode, j as createCommentVNode, D as script$4 } from "./index-CgU1oKZt.js";
|
||||
import { s as script, a as script$2, b as script$3 } from "./index-DBWDcZsl.js";
|
||||
import "./index-DYEEBf64.js";
|
||||
const _hoisted_1 = { class: "extension-panel" };
|
||||
const _hoisted_2 = { class: "mt-4" };
|
||||
const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
__name: "ExtensionPanel",
|
||||
setup(__props) {
|
||||
const extensionStore = useExtensionStore();
|
||||
const settingStore = useSettingStore();
|
||||
const editingEnabledExtensions = ref({});
|
||||
onMounted(() => {
|
||||
extensionStore.extensions.forEach((ext) => {
|
||||
editingEnabledExtensions.value[ext.name] = extensionStore.isExtensionEnabled(ext.name);
|
||||
});
|
||||
});
|
||||
const changedExtensions = computed(() => {
|
||||
return extensionStore.extensions.filter(
|
||||
(ext) => editingEnabledExtensions.value[ext.name] !== extensionStore.isExtensionEnabled(ext.name)
|
||||
);
|
||||
});
|
||||
const hasChanges = computed(() => {
|
||||
return changedExtensions.value.length > 0;
|
||||
});
|
||||
const updateExtensionStatus = /* @__PURE__ */ __name(() => {
|
||||
const editingDisabledExtensionNames = Object.entries(
|
||||
editingEnabledExtensions.value
|
||||
).filter(([_, enabled]) => !enabled).map(([name]) => name);
|
||||
settingStore.set("Comfy.Extension.Disabled", [
|
||||
...extensionStore.inactiveDisabledExtensionNames,
|
||||
...editingDisabledExtensionNames
|
||||
]);
|
||||
}, "updateExtensionStatus");
|
||||
const applyChanges = /* @__PURE__ */ __name(() => {
|
||||
window.location.reload();
|
||||
}, "applyChanges");
|
||||
return (_ctx, _cache) => {
|
||||
return openBlock(), createElementBlock("div", _hoisted_1, [
|
||||
createVNode(unref(script$2), {
|
||||
value: unref(extensionStore).extensions,
|
||||
stripedRows: "",
|
||||
size: "small"
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createVNode(unref(script), {
|
||||
field: "name",
|
||||
header: _ctx.$t("extensionName"),
|
||||
sortable: ""
|
||||
}, null, 8, ["header"]),
|
||||
createVNode(unref(script), { pt: {
|
||||
bodyCell: "flex items-center justify-end"
|
||||
} }, {
|
||||
body: withCtx((slotProps) => [
|
||||
createVNode(unref(script$1), {
|
||||
modelValue: editingEnabledExtensions.value[slotProps.data.name],
|
||||
"onUpdate:modelValue": /* @__PURE__ */ __name(($event) => editingEnabledExtensions.value[slotProps.data.name] = $event, "onUpdate:modelValue"),
|
||||
onChange: updateExtensionStatus
|
||||
}, null, 8, ["modelValue", "onUpdate:modelValue"])
|
||||
]),
|
||||
_: 1
|
||||
})
|
||||
]),
|
||||
_: 1
|
||||
}, 8, ["value"]),
|
||||
createBaseVNode("div", _hoisted_2, [
|
||||
hasChanges.value ? (openBlock(), createBlock(unref(script$3), {
|
||||
key: 0,
|
||||
severity: "info"
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createBaseVNode("ul", null, [
|
||||
(openBlock(true), createElementBlock(Fragment, null, renderList(changedExtensions.value, (ext) => {
|
||||
return openBlock(), createElementBlock("li", {
|
||||
key: ext.name
|
||||
}, [
|
||||
createBaseVNode("span", null, toDisplayString(unref(extensionStore).isExtensionEnabled(ext.name) ? "[-]" : "[+]"), 1),
|
||||
createTextVNode(" " + toDisplayString(ext.name), 1)
|
||||
]);
|
||||
}), 128))
|
||||
])
|
||||
]),
|
||||
_: 1
|
||||
})) : createCommentVNode("", true),
|
||||
createVNode(unref(script$4), {
|
||||
label: _ctx.$t("reloadToApplyChanges"),
|
||||
icon: "pi pi-refresh",
|
||||
onClick: applyChanges,
|
||||
disabled: !hasChanges.value,
|
||||
text: "",
|
||||
fluid: "",
|
||||
severity: "danger"
|
||||
}, null, 8, ["label", "disabled"])
|
||||
])
|
||||
]);
|
||||
};
|
||||
}
|
||||
});
|
||||
export {
|
||||
_sfc_main as default
|
||||
};
|
||||
//# sourceMappingURL=ExtensionPanel-DZLYjWBj.js.map
|
1
web/assets/ExtensionPanel-DZLYjWBj.js.map
generated
vendored
Normal file
1
web/assets/ExtensionPanel-DZLYjWBj.js.map
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
{"version":3,"file":"ExtensionPanel-DZLYjWBj.js","sources":["../../src/components/dialog/content/setting/ExtensionPanel.vue"],"sourcesContent":["<template>\n <div class=\"extension-panel\">\n <DataTable :value=\"extensionStore.extensions\" stripedRows size=\"small\">\n <Column field=\"name\" :header=\"$t('extensionName')\" sortable></Column>\n <Column\n :pt=\"{\n bodyCell: 'flex items-center justify-end'\n }\"\n >\n <template #body=\"slotProps\">\n <ToggleSwitch\n v-model=\"editingEnabledExtensions[slotProps.data.name]\"\n @change=\"updateExtensionStatus\"\n />\n </template>\n </Column>\n </DataTable>\n <div class=\"mt-4\">\n <Message v-if=\"hasChanges\" severity=\"info\">\n <ul>\n <li v-for=\"ext in changedExtensions\" :key=\"ext.name\">\n <span>\n {{ extensionStore.isExtensionEnabled(ext.name) ? '[-]' : '[+]' }}\n </span>\n {{ ext.name }}\n </li>\n </ul>\n </Message>\n <Button\n :label=\"$t('reloadToApplyChanges')\"\n icon=\"pi pi-refresh\"\n @click=\"applyChanges\"\n :disabled=\"!hasChanges\"\n text\n fluid\n severity=\"danger\"\n />\n </div>\n </div>\n</template>\n\n<script setup lang=\"ts\">\nimport { ref, computed, onMounted } from 'vue'\nimport { useExtensionStore } from '@/stores/extensionStore'\nimport { useSettingStore } from '@/stores/settingStore'\nimport DataTable from 'primevue/datatable'\nimport Column from 'primevue/column'\nimport ToggleSwitch from 'primevue/toggleswitch'\nimport Button from 'primevue/button'\nimport Message from 'primevue/message'\n\nconst extensionStore = useExtensionStore()\nconst settingStore = useSettingStore()\n\nconst editingEnabledExtensions = ref<Record<string, boolean>>({})\n\nonMounted(() => {\n extensionStore.extensions.forEach((ext) => {\n editingEnabledExtensions.value[ext.name] =\n extensionStore.isExtensionEnabled(ext.name)\n })\n})\n\nconst changedExtensions = computed(() => {\n return extensionStore.extensions.filter(\n (ext) =>\n editingEnabledExtensions.value[ext.name] !==\n extensionStore.isExtensionEnabled(ext.name)\n )\n})\n\nconst hasChanges = computed(() => {\n return changedExtensions.value.length > 0\n})\n\nconst updateExtensionStatus = () => {\n const editingDisabledExtensionNames = Object.entries(\n editingEnabledExtensions.value\n )\n .filter(([_, enabled]) => !enabled)\n .map(([name]) => name)\n\n settingStore.set('Comfy.Extension.Disabled', [\n ...extensionStore.inactiveDisabledExtensionNames,\n ...editingDisabledExtensionNames\n ])\n}\n\nconst applyChanges = () => {\n // Refresh the page to apply changes\n window.location.reload()\n}\n</script>\n"],"names":[],"mappings":";;;;;;;;;;AAmDA,UAAM,iBAAiB;AACvB,UAAM,eAAe;AAEf,UAAA,2BAA2B,IAA6B,CAAA,CAAE;AAEhE,cAAU,MAAM;AACC,qBAAA,WAAW,QAAQ,CAAC,QAAQ;AACzC,iCAAyB,MAAM,IAAI,IAAI,IACrC,eAAe,mBAAmB,IAAI,IAAI;AAAA,MAAA,CAC7C;AAAA,IAAA,CACF;AAEK,UAAA,oBAAoB,SAAS,MAAM;AACvC,aAAO,eAAe,WAAW;AAAA,QAC/B,CAAC,QACC,yBAAyB,MAAM,IAAI,IAAI,MACvC,eAAe,mBAAmB,IAAI,IAAI;AAAA,MAAA;AAAA,IAC9C,CACD;AAEK,UAAA,aAAa,SAAS,MAAM;AACzB,aAAA,kBAAkB,MAAM,SAAS;AAAA,IAAA,CACzC;AAED,UAAM,wBAAwB,6BAAM;AAClC,YAAM,gCAAgC,OAAO;AAAA,QAC3C,yBAAyB;AAAA,MAExB,EAAA,OAAO,CAAC,CAAC,GAAG,OAAO,MAAM,CAAC,OAAO,EACjC,IAAI,CAAC,CAAC,IAAI,MAAM,IAAI;AAEvB,mBAAa,IAAI,4BAA4B;AAAA,QAC3C,GAAG,eAAe;AAAA,QAClB,GAAG;AAAA,MAAA,CACJ;AAAA,IAAA,GAV2B;AAa9B,UAAM,eAAe,6BAAM;AAEzB,aAAO,SAAS;IAAO,GAFJ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;"}
|
792
web/assets/GraphView-BGt8GmeB.css
generated
vendored
792
web/assets/GraphView-BGt8GmeB.css
generated
vendored
@ -1,792 +0,0 @@
|
||||
|
||||
.editable-text[data-v-54da6fc9] {
|
||||
display: inline;
|
||||
}
|
||||
.editable-text input[data-v-54da6fc9] {
|
||||
width: 100%;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.group-title-editor.node-title-editor[data-v-fc3f26e3] {
|
||||
z-index: 9999;
|
||||
padding: 0.25rem;
|
||||
}
|
||||
[data-v-fc3f26e3] .editable-text {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
[data-v-fc3f26e3] .editable-text input {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
/* Override the default font size */
|
||||
font-size: inherit;
|
||||
}
|
||||
|
||||
.side-bar-button-icon {
|
||||
font-size: var(--sidebar-icon-size) !important;
|
||||
}
|
||||
.side-bar-button-selected .side-bar-button-icon {
|
||||
font-size: var(--sidebar-icon-size) !important;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.side-bar-button[data-v-caa3ee9c] {
|
||||
width: var(--sidebar-width);
|
||||
height: var(--sidebar-width);
|
||||
border-radius: 0;
|
||||
}
|
||||
.comfyui-body-left .side-bar-button.side-bar-button-selected[data-v-caa3ee9c],
|
||||
.comfyui-body-left .side-bar-button.side-bar-button-selected[data-v-caa3ee9c]:hover {
|
||||
border-left: 4px solid var(--p-button-text-primary-color);
|
||||
}
|
||||
.comfyui-body-right .side-bar-button.side-bar-button-selected[data-v-caa3ee9c],
|
||||
.comfyui-body-right .side-bar-button.side-bar-button-selected[data-v-caa3ee9c]:hover {
|
||||
border-right: 4px solid var(--p-button-text-primary-color);
|
||||
}
|
||||
|
||||
:root {
|
||||
--sidebar-width: 64px;
|
||||
--sidebar-icon-size: 1.5rem;
|
||||
}
|
||||
:root .small-sidebar {
|
||||
--sidebar-width: 40px;
|
||||
--sidebar-icon-size: 1rem;
|
||||
}
|
||||
|
||||
.side-tool-bar-container[data-v-4da64512] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
|
||||
pointer-events: auto;
|
||||
|
||||
width: var(--sidebar-width);
|
||||
height: 100%;
|
||||
|
||||
background-color: var(--comfy-menu-bg);
|
||||
color: var(--fg-color);
|
||||
}
|
||||
.side-tool-bar-end[data-v-4da64512] {
|
||||
align-self: flex-end;
|
||||
margin-top: auto;
|
||||
}
|
||||
.sidebar-content-container[data-v-4da64512] {
|
||||
height: 100%;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
.p-splitter-gutter {
|
||||
pointer-events: auto;
|
||||
}
|
||||
.gutter-hidden {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
.side-bar-panel[data-v-b9df3042] {
|
||||
background-color: var(--bg-color);
|
||||
pointer-events: auto;
|
||||
}
|
||||
.splitter-overlay[data-v-b9df3042] {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
background-color: transparent;
|
||||
pointer-events: none;
|
||||
/* Set it the same as the ComfyUI menu */
|
||||
/* Note: Lite-graph DOM widgets have the same z-index as the node id, so
|
||||
999 should be sufficient to make sure splitter overlays on node's DOM
|
||||
widgets */
|
||||
z-index: 999;
|
||||
border: none;
|
||||
}
|
||||
|
||||
._content[data-v-e7b35fd9] {
|
||||
|
||||
display: flex;
|
||||
|
||||
flex-direction: column
|
||||
}
|
||||
._content[data-v-e7b35fd9] > :not([hidden]) ~ :not([hidden]) {
|
||||
|
||||
--tw-space-y-reverse: 0;
|
||||
|
||||
margin-top: calc(0.5rem * calc(1 - var(--tw-space-y-reverse)));
|
||||
|
||||
margin-bottom: calc(0.5rem * var(--tw-space-y-reverse))
|
||||
}
|
||||
._footer[data-v-e7b35fd9] {
|
||||
|
||||
display: flex;
|
||||
|
||||
flex-direction: column;
|
||||
|
||||
align-items: flex-end;
|
||||
|
||||
padding-top: 1rem
|
||||
}
|
||||
|
||||
[data-v-37f672ab] .highlight {
|
||||
background-color: var(--p-primary-color);
|
||||
color: var(--p-primary-contrast-color);
|
||||
font-weight: bold;
|
||||
border-radius: 0.25rem;
|
||||
padding: 0rem 0.125rem;
|
||||
margin: -0.125rem 0.125rem;
|
||||
}
|
||||
|
||||
.slot_row[data-v-ff07c900] {
|
||||
padding: 2px;
|
||||
}
|
||||
|
||||
/* Original N-Sidebar styles */
|
||||
._sb_dot[data-v-ff07c900] {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
border-radius: 50%;
|
||||
background-color: grey;
|
||||
}
|
||||
.node_header[data-v-ff07c900] {
|
||||
line-height: 1;
|
||||
padding: 8px 13px 7px;
|
||||
margin-bottom: 5px;
|
||||
font-size: 15px;
|
||||
text-wrap: nowrap;
|
||||
overflow: hidden;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
.headdot[data-v-ff07c900] {
|
||||
width: 10px;
|
||||
height: 10px;
|
||||
float: inline-start;
|
||||
margin-right: 8px;
|
||||
}
|
||||
.IMAGE[data-v-ff07c900] {
|
||||
background-color: #64b5f6;
|
||||
}
|
||||
.VAE[data-v-ff07c900] {
|
||||
background-color: #ff6e6e;
|
||||
}
|
||||
.LATENT[data-v-ff07c900] {
|
||||
background-color: #ff9cf9;
|
||||
}
|
||||
.MASK[data-v-ff07c900] {
|
||||
background-color: #81c784;
|
||||
}
|
||||
.CONDITIONING[data-v-ff07c900] {
|
||||
background-color: #ffa931;
|
||||
}
|
||||
.CLIP[data-v-ff07c900] {
|
||||
background-color: #ffd500;
|
||||
}
|
||||
.MODEL[data-v-ff07c900] {
|
||||
background-color: #b39ddb;
|
||||
}
|
||||
.CONTROL_NET[data-v-ff07c900] {
|
||||
background-color: #a5d6a7;
|
||||
}
|
||||
._sb_node_preview[data-v-ff07c900] {
|
||||
background-color: var(--comfy-menu-bg);
|
||||
font-family: 'Open Sans', sans-serif;
|
||||
font-size: small;
|
||||
color: var(--descrip-text);
|
||||
border: 1px solid var(--descrip-text);
|
||||
min-width: 300px;
|
||||
width: -moz-min-content;
|
||||
width: min-content;
|
||||
height: -moz-fit-content;
|
||||
height: fit-content;
|
||||
z-index: 9999;
|
||||
border-radius: 12px;
|
||||
overflow: hidden;
|
||||
font-size: 12px;
|
||||
padding-bottom: 10px;
|
||||
}
|
||||
._sb_node_preview ._sb_description[data-v-ff07c900] {
|
||||
margin: 10px;
|
||||
padding: 6px;
|
||||
background: var(--border-color);
|
||||
border-radius: 5px;
|
||||
font-style: italic;
|
||||
font-weight: 500;
|
||||
font-size: 0.9rem;
|
||||
word-break: break-word;
|
||||
}
|
||||
._sb_table[data-v-ff07c900] {
|
||||
display: grid;
|
||||
|
||||
grid-column-gap: 10px;
|
||||
/* Spazio tra le colonne */
|
||||
width: 100%;
|
||||
/* Imposta la larghezza della tabella al 100% del contenitore */
|
||||
}
|
||||
._sb_row[data-v-ff07c900] {
|
||||
display: grid;
|
||||
grid-template-columns: 10px 1fr 1fr 1fr 10px;
|
||||
grid-column-gap: 10px;
|
||||
align-items: center;
|
||||
padding-left: 9px;
|
||||
padding-right: 9px;
|
||||
}
|
||||
._sb_row_string[data-v-ff07c900] {
|
||||
grid-template-columns: 10px 1fr 1fr 10fr 1fr;
|
||||
}
|
||||
._sb_col[data-v-ff07c900] {
|
||||
border: 0px solid #000;
|
||||
display: flex;
|
||||
align-items: flex-end;
|
||||
flex-direction: row-reverse;
|
||||
flex-wrap: nowrap;
|
||||
align-content: flex-start;
|
||||
justify-content: flex-end;
|
||||
}
|
||||
._sb_inherit[data-v-ff07c900] {
|
||||
display: inherit;
|
||||
}
|
||||
._long_field[data-v-ff07c900] {
|
||||
background: var(--bg-color);
|
||||
border: 2px solid var(--border-color);
|
||||
margin: 5px 5px 0 5px;
|
||||
border-radius: 10px;
|
||||
line-height: 1.7;
|
||||
text-wrap: nowrap;
|
||||
}
|
||||
._sb_arrow[data-v-ff07c900] {
|
||||
color: var(--fg-color);
|
||||
}
|
||||
._sb_preview_badge[data-v-ff07c900] {
|
||||
text-align: center;
|
||||
background: var(--comfy-input-bg);
|
||||
font-weight: bold;
|
||||
color: var(--error-text);
|
||||
}
|
||||
|
||||
.comfy-vue-node-search-container[data-v-2d409367] {
|
||||
display: flex;
|
||||
width: 100%;
|
||||
min-width: 26rem;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
.comfy-vue-node-search-container[data-v-2d409367] * {
|
||||
pointer-events: auto;
|
||||
}
|
||||
.comfy-vue-node-preview-container[data-v-2d409367] {
|
||||
position: absolute;
|
||||
left: -350px;
|
||||
top: 50px;
|
||||
}
|
||||
.comfy-vue-node-search-box[data-v-2d409367] {
|
||||
z-index: 10;
|
||||
flex-grow: 1;
|
||||
}
|
||||
._filter-button[data-v-2d409367] {
|
||||
z-index: 10;
|
||||
}
|
||||
._dialog[data-v-2d409367] {
|
||||
min-width: 26rem;
|
||||
}
|
||||
|
||||
.invisible-dialog-root {
|
||||
width: 60%;
|
||||
min-width: 24rem;
|
||||
max-width: 48rem;
|
||||
border: 0 !important;
|
||||
background-color: transparent !important;
|
||||
margin-top: 25vh;
|
||||
margin-left: 400px;
|
||||
}
|
||||
@media all and (max-width: 768px) {
|
||||
.invisible-dialog-root {
|
||||
margin-left: 0px;
|
||||
}
|
||||
}
|
||||
.node-search-box-dialog-mask {
|
||||
align-items: flex-start !important;
|
||||
}
|
||||
|
||||
.node-tooltip[data-v-0a4402f9] {
|
||||
background: var(--comfy-input-bg);
|
||||
border-radius: 5px;
|
||||
box-shadow: 0 0 5px rgba(0, 0, 0, 0.4);
|
||||
color: var(--input-text);
|
||||
font-family: sans-serif;
|
||||
left: 0;
|
||||
max-width: 30vw;
|
||||
padding: 4px 8px;
|
||||
position: absolute;
|
||||
top: 0;
|
||||
transform: translate(5px, calc(-100% - 5px));
|
||||
white-space: pre-wrap;
|
||||
z-index: 99999;
|
||||
}
|
||||
|
||||
.p-buttongroup-vertical[data-v-ce8bd6ac] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
border-radius: var(--p-button-border-radius);
|
||||
overflow: hidden;
|
||||
border: 1px solid var(--p-panel-border-color);
|
||||
}
|
||||
.p-buttongroup-vertical .p-button[data-v-ce8bd6ac] {
|
||||
margin: 0;
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
.comfy-image-wrap[data-v-9bc23daf] {
|
||||
display: contents;
|
||||
}
|
||||
.comfy-image-blur[data-v-9bc23daf] {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
-o-object-fit: cover;
|
||||
object-fit: cover;
|
||||
}
|
||||
.comfy-image-main[data-v-9bc23daf] {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
-o-object-fit: cover;
|
||||
object-fit: cover;
|
||||
-o-object-position: center;
|
||||
object-position: center;
|
||||
z-index: 1;
|
||||
}
|
||||
.contain .comfy-image-wrap[data-v-9bc23daf] {
|
||||
position: relative;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
.contain .comfy-image-main[data-v-9bc23daf] {
|
||||
-o-object-fit: contain;
|
||||
object-fit: contain;
|
||||
-webkit-backdrop-filter: blur(10px);
|
||||
backdrop-filter: blur(10px);
|
||||
position: absolute;
|
||||
}
|
||||
.broken-image-placeholder[data-v-9bc23daf] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
margin: 2rem;
|
||||
}
|
||||
.broken-image-placeholder i[data-v-9bc23daf] {
|
||||
font-size: 3rem;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.result-container[data-v-d9c060ae] {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
aspect-ratio: 1 / 1;
|
||||
overflow: hidden;
|
||||
position: relative;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
}
|
||||
.image-preview-mask[data-v-d9c060ae] {
|
||||
position: absolute;
|
||||
left: 50%;
|
||||
top: 50%;
|
||||
transform: translate(-50%, -50%);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
opacity: 0;
|
||||
transition: opacity 0.3s ease;
|
||||
z-index: 1;
|
||||
}
|
||||
.result-container:hover .image-preview-mask[data-v-d9c060ae] {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.task-result-preview[data-v-d4c8a1fe] {
|
||||
aspect-ratio: 1 / 1;
|
||||
overflow: hidden;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
.task-result-preview i[data-v-d4c8a1fe],
|
||||
.task-result-preview span[data-v-d4c8a1fe] {
|
||||
font-size: 2rem;
|
||||
}
|
||||
.task-item[data-v-d4c8a1fe] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
border-radius: 4px;
|
||||
overflow: hidden;
|
||||
position: relative;
|
||||
}
|
||||
.task-item-details[data-v-d4c8a1fe] {
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
padding: 0.6rem;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
width: 100%;
|
||||
z-index: 1;
|
||||
}
|
||||
.task-node-link[data-v-d4c8a1fe] {
|
||||
padding: 2px;
|
||||
}
|
||||
|
||||
/* In dark mode, transparent background color for tags is not ideal for tags that
|
||||
are floating on top of images. */
|
||||
.tag-wrapper[data-v-d4c8a1fe] {
|
||||
background-color: var(--p-primary-contrast-color);
|
||||
border-radius: 6px;
|
||||
display: inline-flex;
|
||||
}
|
||||
.node-name-tag[data-v-d4c8a1fe] {
|
||||
word-break: break-all;
|
||||
}
|
||||
.status-tag-group[data-v-d4c8a1fe] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
.progress-preview-img[data-v-d4c8a1fe] {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
-o-object-fit: cover;
|
||||
object-fit: cover;
|
||||
-o-object-position: center;
|
||||
object-position: center;
|
||||
}
|
||||
|
||||
/* PrimeVue's galleria teleports the fullscreen gallery out of subtree so we
|
||||
cannot use scoped style here. */
|
||||
img.galleria-image {
|
||||
max-width: 100vw;
|
||||
max-height: 100vh;
|
||||
-o-object-fit: contain;
|
||||
object-fit: contain;
|
||||
}
|
||||
.p-galleria-close-button {
|
||||
/* Set z-index so the close button doesn't get hidden behind the image when image is large */
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
.comfy-vue-side-bar-container[data-v-1b0a8fe3] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
height: 100%;
|
||||
overflow: hidden;
|
||||
}
|
||||
.comfy-vue-side-bar-header[data-v-1b0a8fe3] {
|
||||
flex-shrink: 0;
|
||||
border-left: none;
|
||||
border-right: none;
|
||||
border-top: none;
|
||||
border-radius: 0;
|
||||
padding: 0.25rem 1rem;
|
||||
min-height: 2.5rem;
|
||||
}
|
||||
.comfy-vue-side-bar-header-span[data-v-1b0a8fe3] {
|
||||
font-size: small;
|
||||
}
|
||||
.comfy-vue-side-bar-body[data-v-1b0a8fe3] {
|
||||
flex-grow: 1;
|
||||
overflow: auto;
|
||||
scrollbar-width: thin;
|
||||
scrollbar-color: transparent transparent;
|
||||
}
|
||||
.comfy-vue-side-bar-body[data-v-1b0a8fe3]::-webkit-scrollbar {
|
||||
width: 1px;
|
||||
}
|
||||
.comfy-vue-side-bar-body[data-v-1b0a8fe3]::-webkit-scrollbar-thumb {
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
.scroll-container[data-v-08fa89b1] {
|
||||
height: 100%;
|
||||
overflow-y: auto;
|
||||
}
|
||||
.queue-grid[data-v-08fa89b1] {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fill, minmax(200px, 1fr));
|
||||
padding: 0.5rem;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
.tree-node[data-v-633e27ab] {
|
||||
width: 100%;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
}
|
||||
.leaf-count-badge[data-v-633e27ab] {
|
||||
margin-left: 0.5rem;
|
||||
}
|
||||
.node-content[data-v-633e27ab] {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
flex-grow: 1;
|
||||
}
|
||||
.leaf-label[data-v-633e27ab] {
|
||||
margin-left: 0.5rem;
|
||||
}
|
||||
[data-v-633e27ab] .editable-text span {
|
||||
word-break: break-all;
|
||||
}
|
||||
|
||||
[data-v-bd7bae90] .tree-explorer-node-label {
|
||||
width: 100%;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
margin-left: var(--p-tree-node-gap);
|
||||
flex-grow: 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* The following styles are necessary to avoid layout shift when dragging nodes over folders.
|
||||
* By setting the position to relative on the parent and using an absolutely positioned pseudo-element,
|
||||
* we can create a visual indicator for the drop target without affecting the layout of other elements.
|
||||
*/
|
||||
[data-v-bd7bae90] .p-tree-node-content:has(.tree-folder) {
|
||||
position: relative;
|
||||
}
|
||||
[data-v-bd7bae90] .p-tree-node-content:has(.tree-folder.can-drop)::after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
border: 1px solid var(--p-content-color);
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.node-lib-node-container[data-v-90dfee08] {
|
||||
height: 100%;
|
||||
width: 100%
|
||||
}
|
||||
|
||||
.p-selectbutton .p-button[data-v-91077f2a] {
|
||||
padding: 0.5rem;
|
||||
}
|
||||
.p-selectbutton .p-button .pi[data-v-91077f2a] {
|
||||
font-size: 1.5rem;
|
||||
}
|
||||
.field[data-v-91077f2a] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
.color-picker-container[data-v-91077f2a] {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
.node-lib-filter-popup {
|
||||
margin-left: -13px;
|
||||
}
|
||||
|
||||
[data-v-f6a7371a] .comfy-vue-side-bar-body {
|
||||
background: var(--p-tree-background);
|
||||
}
|
||||
[data-v-f6a7371a] .node-lib-bookmark-tree-explorer {
|
||||
padding-bottom: 2px;
|
||||
}
|
||||
[data-v-f6a7371a] .p-divider {
|
||||
margin: var(--comfy-tree-explorer-item-padding) 0px;
|
||||
}
|
||||
|
||||
.model_preview[data-v-32e6c4d9] {
|
||||
background-color: var(--comfy-menu-bg);
|
||||
font-family: 'Open Sans', sans-serif;
|
||||
color: var(--descrip-text);
|
||||
border: 1px solid var(--descrip-text);
|
||||
min-width: 300px;
|
||||
max-width: 500px;
|
||||
width: -moz-fit-content;
|
||||
width: fit-content;
|
||||
height: -moz-fit-content;
|
||||
height: fit-content;
|
||||
z-index: 9999;
|
||||
border-radius: 12px;
|
||||
overflow: hidden;
|
||||
font-size: 12px;
|
||||
padding: 10px;
|
||||
}
|
||||
.model_preview_image[data-v-32e6c4d9] {
|
||||
margin: auto;
|
||||
width: -moz-fit-content;
|
||||
width: fit-content;
|
||||
}
|
||||
.model_preview_image img[data-v-32e6c4d9] {
|
||||
max-width: 100%;
|
||||
max-height: 150px;
|
||||
-o-object-fit: contain;
|
||||
object-fit: contain;
|
||||
}
|
||||
.model_preview_title[data-v-32e6c4d9] {
|
||||
font-weight: bold;
|
||||
text-align: center;
|
||||
font-size: 14px;
|
||||
}
|
||||
.model_preview_top_container[data-v-32e6c4d9] {
|
||||
text-align: center;
|
||||
line-height: 0.5;
|
||||
}
|
||||
.model_preview_filename[data-v-32e6c4d9],
|
||||
.model_preview_author[data-v-32e6c4d9],
|
||||
.model_preview_architecture[data-v-32e6c4d9] {
|
||||
display: inline-block;
|
||||
text-align: center;
|
||||
margin: 5px;
|
||||
font-size: 10px;
|
||||
}
|
||||
.model_preview_prefix[data-v-32e6c4d9] {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.model-lib-model-icon-container[data-v-70b69131] {
|
||||
display: inline-block;
|
||||
position: relative;
|
||||
left: 0;
|
||||
height: 1.5rem;
|
||||
vertical-align: top;
|
||||
width: 0px;
|
||||
}
|
||||
.model-lib-model-icon[data-v-70b69131] {
|
||||
background-size: cover;
|
||||
background-position: center;
|
||||
display: inline-block;
|
||||
position: relative;
|
||||
left: -2.5rem;
|
||||
height: 2rem;
|
||||
width: 2rem;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
.pi-fake-spacer {
|
||||
height: 1px;
|
||||
width: 16px;
|
||||
}
|
||||
|
||||
[data-v-74b01bce] .comfy-vue-side-bar-body {
|
||||
background: var(--p-tree-background);
|
||||
}
|
||||
|
||||
[data-v-d2d58252] .comfy-vue-side-bar-body {
|
||||
background: var(--p-tree-background);
|
||||
}
|
||||
|
||||
[data-v-84e785b8] .p-togglebutton::before {
|
||||
display: none
|
||||
}
|
||||
[data-v-84e785b8] .p-togglebutton {
|
||||
position: relative;
|
||||
flex-shrink: 0;
|
||||
border-radius: 0px;
|
||||
background-color: transparent;
|
||||
padding-left: 0.5rem;
|
||||
padding-right: 0.5rem
|
||||
}
|
||||
[data-v-84e785b8] .p-togglebutton.p-togglebutton-checked {
|
||||
border-bottom-width: 2px;
|
||||
border-bottom-color: var(--p-button-text-primary-color)
|
||||
}
|
||||
[data-v-84e785b8] .p-togglebutton-checked .close-button,[data-v-84e785b8] .p-togglebutton:hover .close-button {
|
||||
visibility: visible
|
||||
}
|
||||
.status-indicator[data-v-84e785b8] {
|
||||
position: absolute;
|
||||
font-weight: 700;
|
||||
font-size: 1.5rem;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%, -50%)
|
||||
}
|
||||
[data-v-84e785b8] .p-togglebutton:hover .status-indicator {
|
||||
display: none
|
||||
}
|
||||
[data-v-84e785b8] .p-togglebutton .close-button {
|
||||
visibility: hidden
|
||||
}
|
||||
|
||||
.top-menubar[data-v-2ec1b620] .p-menubar-item-link svg {
|
||||
display: none;
|
||||
}
|
||||
[data-v-2ec1b620] .p-menubar-submenu.dropdown-direction-up {
|
||||
top: auto;
|
||||
bottom: 100%;
|
||||
flex-direction: column-reverse;
|
||||
}
|
||||
.keybinding-tag[data-v-2ec1b620] {
|
||||
background: var(--p-content-hover-background);
|
||||
border-color: var(--p-content-border-color);
|
||||
border-style: solid;
|
||||
}
|
||||
|
||||
[data-v-713442be] .p-inputtext {
|
||||
border-top-left-radius: 0;
|
||||
border-bottom-left-radius: 0;
|
||||
}
|
||||
|
||||
.comfyui-queue-button[data-v-fcd3efcd] .p-splitbutton-dropdown {
|
||||
border-top-right-radius: 0;
|
||||
border-bottom-right-radius: 0;
|
||||
}
|
||||
|
||||
.actionbar[data-v-bc6c78dd] {
|
||||
pointer-events: all;
|
||||
position: fixed;
|
||||
z-index: 1000;
|
||||
}
|
||||
.actionbar.is-docked[data-v-bc6c78dd] {
|
||||
position: static;
|
||||
border-style: none;
|
||||
background-color: transparent;
|
||||
padding: 0px;
|
||||
}
|
||||
.actionbar.is-dragging[data-v-bc6c78dd] {
|
||||
-webkit-user-select: none;
|
||||
-moz-user-select: none;
|
||||
user-select: none;
|
||||
}
|
||||
[data-v-bc6c78dd] .p-panel-content {
|
||||
padding: 0.25rem;
|
||||
}
|
||||
[data-v-bc6c78dd] .p-panel-header {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.comfyui-menu[data-v-b13fdc92] {
|
||||
width: 100vw;
|
||||
background: var(--comfy-menu-bg);
|
||||
color: var(--fg-color);
|
||||
font-family: Arial, Helvetica, sans-serif;
|
||||
font-size: 0.8em;
|
||||
box-sizing: border-box;
|
||||
z-index: 1000;
|
||||
order: 0;
|
||||
grid-column: 1/-1;
|
||||
max-height: 90vh;
|
||||
}
|
||||
.comfyui-menu.dropzone[data-v-b13fdc92] {
|
||||
background: var(--p-highlight-background);
|
||||
}
|
||||
.comfyui-menu.dropzone-active[data-v-b13fdc92] {
|
||||
background: var(--p-highlight-background-focus);
|
||||
}
|
||||
.comfyui-logo[data-v-b13fdc92] {
|
||||
font-size: 1.2em;
|
||||
-webkit-user-select: none;
|
||||
-moz-user-select: none;
|
||||
user-select: none;
|
||||
cursor: default;
|
||||
}
|
278
web/assets/GraphView-Bx1-rDWO.css
generated
vendored
Normal file
278
web/assets/GraphView-Bx1-rDWO.css
generated
vendored
Normal file
@ -0,0 +1,278 @@
|
||||
|
||||
.group-title-editor.node-title-editor[data-v-fc3f26e3] {
|
||||
z-index: 9999;
|
||||
padding: 0.25rem;
|
||||
}
|
||||
[data-v-fc3f26e3] .editable-text {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
[data-v-fc3f26e3] .editable-text input {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
/* Override the default font size */
|
||||
font-size: inherit;
|
||||
}
|
||||
|
||||
.side-bar-button-icon {
|
||||
font-size: var(--sidebar-icon-size) !important;
|
||||
}
|
||||
.side-bar-button-selected .side-bar-button-icon {
|
||||
font-size: var(--sidebar-icon-size) !important;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.side-bar-button[data-v-caa3ee9c] {
|
||||
width: var(--sidebar-width);
|
||||
height: var(--sidebar-width);
|
||||
border-radius: 0;
|
||||
}
|
||||
.comfyui-body-left .side-bar-button.side-bar-button-selected[data-v-caa3ee9c],
|
||||
.comfyui-body-left .side-bar-button.side-bar-button-selected[data-v-caa3ee9c]:hover {
|
||||
border-left: 4px solid var(--p-button-text-primary-color);
|
||||
}
|
||||
.comfyui-body-right .side-bar-button.side-bar-button-selected[data-v-caa3ee9c],
|
||||
.comfyui-body-right .side-bar-button.side-bar-button-selected[data-v-caa3ee9c]:hover {
|
||||
border-right: 4px solid var(--p-button-text-primary-color);
|
||||
}
|
||||
|
||||
:root {
|
||||
--sidebar-width: 64px;
|
||||
--sidebar-icon-size: 1.5rem;
|
||||
}
|
||||
:root .small-sidebar {
|
||||
--sidebar-width: 40px;
|
||||
--sidebar-icon-size: 1rem;
|
||||
}
|
||||
|
||||
.side-tool-bar-container[data-v-b6bfc188] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
|
||||
pointer-events: auto;
|
||||
|
||||
width: var(--sidebar-width);
|
||||
height: 100%;
|
||||
|
||||
background-color: var(--comfy-menu-bg);
|
||||
color: var(--fg-color);
|
||||
}
|
||||
.side-tool-bar-end[data-v-b6bfc188] {
|
||||
align-self: flex-end;
|
||||
margin-top: auto;
|
||||
}
|
||||
|
||||
.p-splitter-gutter {
|
||||
pointer-events: auto;
|
||||
}
|
||||
.gutter-hidden {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
.side-bar-panel[data-v-b9df3042] {
|
||||
background-color: var(--bg-color);
|
||||
pointer-events: auto;
|
||||
}
|
||||
.splitter-overlay[data-v-b9df3042] {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
background-color: transparent;
|
||||
pointer-events: none;
|
||||
/* Set it the same as the ComfyUI menu */
|
||||
/* Note: Lite-graph DOM widgets have the same z-index as the node id, so
|
||||
999 should be sufficient to make sure splitter overlays on node's DOM
|
||||
widgets */
|
||||
z-index: 999;
|
||||
border: none;
|
||||
}
|
||||
|
||||
[data-v-37f672ab] .highlight {
|
||||
background-color: var(--p-primary-color);
|
||||
color: var(--p-primary-contrast-color);
|
||||
font-weight: bold;
|
||||
border-radius: 0.25rem;
|
||||
padding: 0rem 0.125rem;
|
||||
margin: -0.125rem 0.125rem;
|
||||
}
|
||||
|
||||
.comfy-vue-node-search-container[data-v-2d409367] {
|
||||
display: flex;
|
||||
width: 100%;
|
||||
min-width: 26rem;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
.comfy-vue-node-search-container[data-v-2d409367] * {
|
||||
pointer-events: auto;
|
||||
}
|
||||
.comfy-vue-node-preview-container[data-v-2d409367] {
|
||||
position: absolute;
|
||||
left: -350px;
|
||||
top: 50px;
|
||||
}
|
||||
.comfy-vue-node-search-box[data-v-2d409367] {
|
||||
z-index: 10;
|
||||
flex-grow: 1;
|
||||
}
|
||||
._filter-button[data-v-2d409367] {
|
||||
z-index: 10;
|
||||
}
|
||||
._dialog[data-v-2d409367] {
|
||||
min-width: 26rem;
|
||||
}
|
||||
|
||||
.invisible-dialog-root {
|
||||
width: 60%;
|
||||
min-width: 24rem;
|
||||
max-width: 48rem;
|
||||
border: 0 !important;
|
||||
background-color: transparent !important;
|
||||
margin-top: 25vh;
|
||||
margin-left: 400px;
|
||||
}
|
||||
@media all and (max-width: 768px) {
|
||||
.invisible-dialog-root {
|
||||
margin-left: 0px;
|
||||
}
|
||||
}
|
||||
.node-search-box-dialog-mask {
|
||||
align-items: flex-start !important;
|
||||
}
|
||||
|
||||
.node-tooltip[data-v-79ec8c53] {
|
||||
background: var(--comfy-input-bg);
|
||||
border-radius: 5px;
|
||||
box-shadow: 0 0 5px rgba(0, 0, 0, 0.4);
|
||||
color: var(--input-text);
|
||||
font-family: sans-serif;
|
||||
left: 0;
|
||||
max-width: 30vw;
|
||||
padding: 4px 8px;
|
||||
position: absolute;
|
||||
top: 0;
|
||||
transform: translate(5px, calc(-100% - 5px));
|
||||
white-space: pre-wrap;
|
||||
z-index: 99999;
|
||||
}
|
||||
|
||||
.p-buttongroup-vertical[data-v-444d3768] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
border-radius: var(--p-button-border-radius);
|
||||
overflow: hidden;
|
||||
border: 1px solid var(--p-panel-border-color);
|
||||
}
|
||||
.p-buttongroup-vertical .p-button[data-v-444d3768] {
|
||||
margin: 0;
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
[data-v-84e785b8] .p-togglebutton::before {
|
||||
display: none
|
||||
}
|
||||
[data-v-84e785b8] .p-togglebutton {
|
||||
position: relative;
|
||||
flex-shrink: 0;
|
||||
border-radius: 0px;
|
||||
background-color: transparent;
|
||||
padding-left: 0.5rem;
|
||||
padding-right: 0.5rem
|
||||
}
|
||||
[data-v-84e785b8] .p-togglebutton.p-togglebutton-checked {
|
||||
border-bottom-width: 2px;
|
||||
border-bottom-color: var(--p-button-text-primary-color)
|
||||
}
|
||||
[data-v-84e785b8] .p-togglebutton-checked .close-button,[data-v-84e785b8] .p-togglebutton:hover .close-button {
|
||||
visibility: visible
|
||||
}
|
||||
.status-indicator[data-v-84e785b8] {
|
||||
position: absolute;
|
||||
font-weight: 700;
|
||||
font-size: 1.5rem;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%, -50%)
|
||||
}
|
||||
[data-v-84e785b8] .p-togglebutton:hover .status-indicator {
|
||||
display: none
|
||||
}
|
||||
[data-v-84e785b8] .p-togglebutton .close-button {
|
||||
visibility: hidden
|
||||
}
|
||||
|
||||
.top-menubar[data-v-9646ca0a] .p-menubar-item-link svg {
|
||||
display: none;
|
||||
}
|
||||
[data-v-9646ca0a] .p-menubar-submenu.dropdown-direction-up {
|
||||
top: auto;
|
||||
bottom: 100%;
|
||||
flex-direction: column-reverse;
|
||||
}
|
||||
.keybinding-tag[data-v-9646ca0a] {
|
||||
background: var(--p-content-hover-background);
|
||||
border-color: var(--p-content-border-color);
|
||||
border-style: solid;
|
||||
}
|
||||
|
||||
[data-v-713442be] .p-inputtext {
|
||||
border-top-left-radius: 0;
|
||||
border-bottom-left-radius: 0;
|
||||
}
|
||||
|
||||
.comfyui-queue-button[data-v-2b80bf74] .p-splitbutton-dropdown {
|
||||
border-top-right-radius: 0;
|
||||
border-bottom-right-radius: 0;
|
||||
}
|
||||
|
||||
.actionbar[data-v-2e54db00] {
|
||||
pointer-events: all;
|
||||
position: fixed;
|
||||
z-index: 1000;
|
||||
}
|
||||
.actionbar.is-docked[data-v-2e54db00] {
|
||||
position: static;
|
||||
border-style: none;
|
||||
background-color: transparent;
|
||||
padding: 0px;
|
||||
}
|
||||
.actionbar.is-dragging[data-v-2e54db00] {
|
||||
-webkit-user-select: none;
|
||||
-moz-user-select: none;
|
||||
user-select: none;
|
||||
}
|
||||
[data-v-2e54db00] .p-panel-content {
|
||||
padding: 0.25rem;
|
||||
}
|
||||
[data-v-2e54db00] .p-panel-header {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.comfyui-menu[data-v-b13fdc92] {
|
||||
width: 100vw;
|
||||
background: var(--comfy-menu-bg);
|
||||
color: var(--fg-color);
|
||||
font-family: Arial, Helvetica, sans-serif;
|
||||
font-size: 0.8em;
|
||||
box-sizing: border-box;
|
||||
z-index: 1000;
|
||||
order: 0;
|
||||
grid-column: 1/-1;
|
||||
max-height: 90vh;
|
||||
}
|
||||
.comfyui-menu.dropzone[data-v-b13fdc92] {
|
||||
background: var(--p-highlight-background);
|
||||
}
|
||||
.comfyui-menu.dropzone-active[data-v-b13fdc92] {
|
||||
background: var(--p-highlight-background-focus);
|
||||
}
|
||||
.comfyui-logo[data-v-b13fdc92] {
|
||||
font-size: 1.2em;
|
||||
-webkit-user-select: none;
|
||||
-moz-user-select: none;
|
||||
user-select: none;
|
||||
cursor: default;
|
||||
}
|
17465
web/assets/GraphView-CVV2XJjS.js
generated
vendored
17465
web/assets/GraphView-CVV2XJjS.js
generated
vendored
File diff suppressed because one or more lines are too long
1
web/assets/GraphView-CVV2XJjS.js.map
generated
vendored
1
web/assets/GraphView-CVV2XJjS.js.map
generated
vendored
File diff suppressed because one or more lines are too long
7361
web/assets/GraphView-DmeOoKWv.js
generated
vendored
Normal file
7361
web/assets/GraphView-DmeOoKWv.js
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1
web/assets/GraphView-DmeOoKWv.js.map
generated
vendored
Normal file
1
web/assets/GraphView-DmeOoKWv.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
8
web/assets/KeybindingPanel-BNYKhW1k.css
generated
vendored
Normal file
8
web/assets/KeybindingPanel-BNYKhW1k.css
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
|
||||
[data-v-e5724e4d] .p-datatable-tbody > tr > td {
|
||||
padding: 1px;
|
||||
min-height: 2rem;
|
||||
}
|
||||
[data-v-e5724e4d] .p-datatable-row-selected .actions,[data-v-e5724e4d] .p-datatable-selectable-row:hover .actions {
|
||||
visibility: visible;
|
||||
}
|
264
web/assets/KeybindingPanel-YkUFoiMw.js
generated
vendored
Normal file
264
web/assets/KeybindingPanel-YkUFoiMw.js
generated
vendored
Normal file
@ -0,0 +1,264 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { d as defineComponent, q as computed, g as openBlock, h as createElementBlock, M as Fragment, N as renderList, i as createVNode, y as withCtx, ap as createTextVNode, am as toDisplayString, z as unref, at as script, j as createCommentVNode, r as ref, bH as FilterMatchMode, K as useKeybindingStore, F as useCommandStore, aC as watchEffect, aZ as useToast, t as resolveDirective, bI as SearchBox, A as createBaseVNode, D as script$2, x as createBlock, af as script$4, b2 as withModifiers, aA as script$6, v as withDirectives, P as pushScopeId, Q as popScopeId, by as KeyComboImpl, bJ as KeybindingImpl, _ as _export_sfc } from "./index-CgU1oKZt.js";
|
||||
import { s as script$1, a as script$3, b as script$5 } from "./index-DBWDcZsl.js";
|
||||
import "./index-DYEEBf64.js";
|
||||
const _hoisted_1$1 = {
|
||||
key: 0,
|
||||
class: "px-2"
|
||||
};
|
||||
const _sfc_main$1 = /* @__PURE__ */ defineComponent({
|
||||
__name: "KeyComboDisplay",
|
||||
props: {
|
||||
keyCombo: {},
|
||||
isModified: { type: Boolean, default: false }
|
||||
},
|
||||
setup(__props) {
|
||||
const props = __props;
|
||||
const keySequences = computed(() => props.keyCombo.getKeySequences());
|
||||
return (_ctx, _cache) => {
|
||||
return openBlock(), createElementBlock("span", null, [
|
||||
(openBlock(true), createElementBlock(Fragment, null, renderList(keySequences.value, (sequence, index) => {
|
||||
return openBlock(), createElementBlock(Fragment, { key: index }, [
|
||||
createVNode(unref(script), {
|
||||
severity: _ctx.isModified ? "info" : "secondary"
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createTextVNode(toDisplayString(sequence), 1)
|
||||
]),
|
||||
_: 2
|
||||
}, 1032, ["severity"]),
|
||||
index < keySequences.value.length - 1 ? (openBlock(), createElementBlock("span", _hoisted_1$1, "+")) : createCommentVNode("", true)
|
||||
], 64);
|
||||
}), 128))
|
||||
]);
|
||||
};
|
||||
}
|
||||
});
|
||||
const _withScopeId = /* @__PURE__ */ __name((n) => (pushScopeId("data-v-e5724e4d"), n = n(), popScopeId(), n), "_withScopeId");
|
||||
const _hoisted_1 = { class: "keybinding-panel" };
|
||||
const _hoisted_2 = { class: "actions invisible" };
|
||||
const _hoisted_3 = { key: 1 };
|
||||
const _sfc_main = /* @__PURE__ */ defineComponent({
|
||||
__name: "KeybindingPanel",
|
||||
setup(__props) {
|
||||
const filters = ref({
|
||||
global: { value: "", matchMode: FilterMatchMode.CONTAINS }
|
||||
});
|
||||
const keybindingStore = useKeybindingStore();
|
||||
const commandStore = useCommandStore();
|
||||
const commandsData = computed(() => {
|
||||
return Object.values(commandStore.commands).map((command) => ({
|
||||
id: command.id,
|
||||
keybinding: keybindingStore.getKeybindingByCommandId(command.id)
|
||||
}));
|
||||
});
|
||||
const selectedCommandData = ref(null);
|
||||
const editDialogVisible = ref(false);
|
||||
const newBindingKeyCombo = ref(null);
|
||||
const currentEditingCommand = ref(null);
|
||||
const keybindingInput = ref(null);
|
||||
const existingKeybindingOnCombo = computed(() => {
|
||||
if (!currentEditingCommand.value) {
|
||||
return null;
|
||||
}
|
||||
if (currentEditingCommand.value.keybinding?.combo?.equals(
|
||||
newBindingKeyCombo.value
|
||||
)) {
|
||||
return null;
|
||||
}
|
||||
if (!newBindingKeyCombo.value) {
|
||||
return null;
|
||||
}
|
||||
return keybindingStore.getKeybinding(newBindingKeyCombo.value);
|
||||
});
|
||||
function editKeybinding(commandData) {
|
||||
currentEditingCommand.value = commandData;
|
||||
newBindingKeyCombo.value = commandData.keybinding ? commandData.keybinding.combo : null;
|
||||
editDialogVisible.value = true;
|
||||
}
|
||||
__name(editKeybinding, "editKeybinding");
|
||||
watchEffect(() => {
|
||||
if (editDialogVisible.value) {
|
||||
setTimeout(() => {
|
||||
keybindingInput.value?.$el?.focus();
|
||||
}, 300);
|
||||
}
|
||||
});
|
||||
function removeKeybinding(commandData) {
|
||||
if (commandData.keybinding) {
|
||||
keybindingStore.unsetKeybinding(commandData.keybinding);
|
||||
keybindingStore.persistUserKeybindings();
|
||||
}
|
||||
}
|
||||
__name(removeKeybinding, "removeKeybinding");
|
||||
function captureKeybinding(event) {
|
||||
const keyCombo = KeyComboImpl.fromEvent(event);
|
||||
newBindingKeyCombo.value = keyCombo;
|
||||
}
|
||||
__name(captureKeybinding, "captureKeybinding");
|
||||
function cancelEdit() {
|
||||
editDialogVisible.value = false;
|
||||
currentEditingCommand.value = null;
|
||||
newBindingKeyCombo.value = null;
|
||||
}
|
||||
__name(cancelEdit, "cancelEdit");
|
||||
function saveKeybinding() {
|
||||
if (currentEditingCommand.value && newBindingKeyCombo.value) {
|
||||
const updated = keybindingStore.updateKeybindingOnCommand(
|
||||
new KeybindingImpl({
|
||||
commandId: currentEditingCommand.value.id,
|
||||
combo: newBindingKeyCombo.value
|
||||
})
|
||||
);
|
||||
if (updated) {
|
||||
keybindingStore.persistUserKeybindings();
|
||||
}
|
||||
}
|
||||
cancelEdit();
|
||||
}
|
||||
__name(saveKeybinding, "saveKeybinding");
|
||||
const toast = useToast();
|
||||
async function resetKeybindings() {
|
||||
keybindingStore.resetKeybindings();
|
||||
await keybindingStore.persistUserKeybindings();
|
||||
toast.add({
|
||||
severity: "info",
|
||||
summary: "Info",
|
||||
detail: "Keybindings reset",
|
||||
life: 3e3
|
||||
});
|
||||
}
|
||||
__name(resetKeybindings, "resetKeybindings");
|
||||
return (_ctx, _cache) => {
|
||||
const _directive_tooltip = resolveDirective("tooltip");
|
||||
return openBlock(), createElementBlock("div", _hoisted_1, [
|
||||
createVNode(unref(script$3), {
|
||||
value: commandsData.value,
|
||||
selection: selectedCommandData.value,
|
||||
"onUpdate:selection": _cache[1] || (_cache[1] = ($event) => selectedCommandData.value = $event),
|
||||
"global-filter-fields": ["id"],
|
||||
filters: filters.value,
|
||||
selectionMode: "single",
|
||||
stripedRows: "",
|
||||
pt: {
|
||||
header: "px-0"
|
||||
}
|
||||
}, {
|
||||
header: withCtx(() => [
|
||||
createVNode(SearchBox, {
|
||||
modelValue: filters.value["global"].value,
|
||||
"onUpdate:modelValue": _cache[0] || (_cache[0] = ($event) => filters.value["global"].value = $event),
|
||||
placeholder: _ctx.$t("searchKeybindings") + "..."
|
||||
}, null, 8, ["modelValue", "placeholder"])
|
||||
]),
|
||||
default: withCtx(() => [
|
||||
createVNode(unref(script$1), {
|
||||
field: "actions",
|
||||
header: ""
|
||||
}, {
|
||||
body: withCtx((slotProps) => [
|
||||
createBaseVNode("div", _hoisted_2, [
|
||||
createVNode(unref(script$2), {
|
||||
icon: "pi pi-pencil",
|
||||
class: "p-button-text",
|
||||
onClick: /* @__PURE__ */ __name(($event) => editKeybinding(slotProps.data), "onClick")
|
||||
}, null, 8, ["onClick"]),
|
||||
createVNode(unref(script$2), {
|
||||
icon: "pi pi-trash",
|
||||
class: "p-button-text p-button-danger",
|
||||
onClick: /* @__PURE__ */ __name(($event) => removeKeybinding(slotProps.data), "onClick"),
|
||||
disabled: !slotProps.data.keybinding
|
||||
}, null, 8, ["onClick", "disabled"])
|
||||
])
|
||||
]),
|
||||
_: 1
|
||||
}),
|
||||
createVNode(unref(script$1), {
|
||||
field: "id",
|
||||
header: "Command ID",
|
||||
sortable: ""
|
||||
}),
|
||||
createVNode(unref(script$1), {
|
||||
field: "keybinding",
|
||||
header: "Keybinding"
|
||||
}, {
|
||||
body: withCtx((slotProps) => [
|
||||
slotProps.data.keybinding ? (openBlock(), createBlock(_sfc_main$1, {
|
||||
key: 0,
|
||||
keyCombo: slotProps.data.keybinding.combo,
|
||||
isModified: unref(keybindingStore).isCommandKeybindingModified(slotProps.data.id)
|
||||
}, null, 8, ["keyCombo", "isModified"])) : (openBlock(), createElementBlock("span", _hoisted_3, "-"))
|
||||
]),
|
||||
_: 1
|
||||
})
|
||||
]),
|
||||
_: 1
|
||||
}, 8, ["value", "selection", "filters"]),
|
||||
createVNode(unref(script$6), {
|
||||
class: "min-w-96",
|
||||
visible: editDialogVisible.value,
|
||||
"onUpdate:visible": _cache[2] || (_cache[2] = ($event) => editDialogVisible.value = $event),
|
||||
modal: "",
|
||||
header: currentEditingCommand.value?.id,
|
||||
onHide: cancelEdit
|
||||
}, {
|
||||
footer: withCtx(() => [
|
||||
createVNode(unref(script$2), {
|
||||
label: "Save",
|
||||
icon: "pi pi-check",
|
||||
onClick: saveKeybinding,
|
||||
disabled: !!existingKeybindingOnCombo.value,
|
||||
autofocus: ""
|
||||
}, null, 8, ["disabled"])
|
||||
]),
|
||||
default: withCtx(() => [
|
||||
createBaseVNode("div", null, [
|
||||
createVNode(unref(script$4), {
|
||||
class: "mb-2 text-center",
|
||||
ref_key: "keybindingInput",
|
||||
ref: keybindingInput,
|
||||
modelValue: newBindingKeyCombo.value?.toString() ?? "",
|
||||
placeholder: "Press keys for new binding",
|
||||
onKeydown: withModifiers(captureKeybinding, ["stop", "prevent"]),
|
||||
autocomplete: "off",
|
||||
fluid: "",
|
||||
invalid: !!existingKeybindingOnCombo.value
|
||||
}, null, 8, ["modelValue", "invalid"]),
|
||||
existingKeybindingOnCombo.value ? (openBlock(), createBlock(unref(script$5), {
|
||||
key: 0,
|
||||
severity: "error"
|
||||
}, {
|
||||
default: withCtx(() => [
|
||||
createTextVNode(" Keybinding already exists on "),
|
||||
createVNode(unref(script), {
|
||||
severity: "secondary",
|
||||
value: existingKeybindingOnCombo.value.commandId
|
||||
}, null, 8, ["value"])
|
||||
]),
|
||||
_: 1
|
||||
})) : createCommentVNode("", true)
|
||||
])
|
||||
]),
|
||||
_: 1
|
||||
}, 8, ["visible", "header"]),
|
||||
withDirectives(createVNode(unref(script$2), {
|
||||
class: "mt-4",
|
||||
label: _ctx.$t("reset"),
|
||||
icon: "pi pi-trash",
|
||||
severity: "danger",
|
||||
fluid: "",
|
||||
text: "",
|
||||
onClick: resetKeybindings
|
||||
}, null, 8, ["label"]), [
|
||||
[_directive_tooltip, _ctx.$t("resetKeybindingsTooltip")]
|
||||
])
|
||||
]);
|
||||
};
|
||||
}
|
||||
});
|
||||
const KeybindingPanel = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-e5724e4d"]]);
|
||||
export {
|
||||
KeybindingPanel as default
|
||||
};
|
||||
//# sourceMappingURL=KeybindingPanel-YkUFoiMw.js.map
|
1
web/assets/KeybindingPanel-YkUFoiMw.js.map
generated
vendored
Normal file
1
web/assets/KeybindingPanel-YkUFoiMw.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
865
web/assets/colorPalette-D5oi2-2V.js
generated
vendored
865
web/assets/colorPalette-D5oi2-2V.js
generated
vendored
@ -1,865 +0,0 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { k as app, aP as LGraphCanvas, bO as useToastStore, ca as $el, z as LiteGraph } from "./index-DGAbdBYF.js";
|
||||
const colorPalettes = {
|
||||
dark: {
|
||||
id: "dark",
|
||||
name: "Dark (Default)",
|
||||
colors: {
|
||||
node_slot: {
|
||||
CLIP: "#FFD500",
|
||||
// bright yellow
|
||||
CLIP_VISION: "#A8DADC",
|
||||
// light blue-gray
|
||||
CLIP_VISION_OUTPUT: "#ad7452",
|
||||
// rusty brown-orange
|
||||
CONDITIONING: "#FFA931",
|
||||
// vibrant orange-yellow
|
||||
CONTROL_NET: "#6EE7B7",
|
||||
// soft mint green
|
||||
IMAGE: "#64B5F6",
|
||||
// bright sky blue
|
||||
LATENT: "#FF9CF9",
|
||||
// light pink-purple
|
||||
MASK: "#81C784",
|
||||
// muted green
|
||||
MODEL: "#B39DDB",
|
||||
// light lavender-purple
|
||||
STYLE_MODEL: "#C2FFAE",
|
||||
// light green-yellow
|
||||
VAE: "#FF6E6E",
|
||||
// bright red
|
||||
NOISE: "#B0B0B0",
|
||||
// gray
|
||||
GUIDER: "#66FFFF",
|
||||
// cyan
|
||||
SAMPLER: "#ECB4B4",
|
||||
// very soft red
|
||||
SIGMAS: "#CDFFCD",
|
||||
// soft lime green
|
||||
TAESD: "#DCC274"
|
||||
// cheesecake
|
||||
},
|
||||
litegraph_base: {
|
||||
BACKGROUND_IMAGE: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAIAAAD/gAIDAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAQBJREFUeNrs1rEKwjAUhlETUkj3vP9rdmr1Ysammk2w5wdxuLgcMHyptfawuZX4pJSWZTnfnu/lnIe/jNNxHHGNn//HNbbv+4dr6V+11uF527arU7+u63qfa/bnmh8sWLBgwYJlqRf8MEptXPBXJXa37BSl3ixYsGDBMliwFLyCV/DeLIMFCxYsWLBMwSt4Be/NggXLYMGCBUvBK3iNruC9WbBgwYJlsGApeAWv4L1ZBgsWLFiwYJmCV/AK3psFC5bBggULloJX8BpdwXuzYMGCBctgwVLwCl7Be7MMFixYsGDBsu8FH1FaSmExVfAxBa/gvVmwYMGCZbBg/W4vAQYA5tRF9QYlv/QAAAAASUVORK5CYII=",
|
||||
CLEAR_BACKGROUND_COLOR: "#222",
|
||||
NODE_TITLE_COLOR: "#999",
|
||||
NODE_SELECTED_TITLE_COLOR: "#FFF",
|
||||
NODE_TEXT_SIZE: 14,
|
||||
NODE_TEXT_COLOR: "#AAA",
|
||||
NODE_SUBTEXT_SIZE: 12,
|
||||
NODE_DEFAULT_COLOR: "#333",
|
||||
NODE_DEFAULT_BGCOLOR: "#353535",
|
||||
NODE_DEFAULT_BOXCOLOR: "#666",
|
||||
NODE_DEFAULT_SHAPE: "box",
|
||||
NODE_BOX_OUTLINE_COLOR: "#FFF",
|
||||
NODE_BYPASS_BGCOLOR: "#FF00FF",
|
||||
DEFAULT_SHADOW_COLOR: "rgba(0,0,0,0.5)",
|
||||
DEFAULT_GROUP_FONT: 24,
|
||||
WIDGET_BGCOLOR: "#222",
|
||||
WIDGET_OUTLINE_COLOR: "#666",
|
||||
WIDGET_TEXT_COLOR: "#DDD",
|
||||
WIDGET_SECONDARY_TEXT_COLOR: "#999",
|
||||
LINK_COLOR: "#9A9",
|
||||
EVENT_LINK_COLOR: "#A86",
|
||||
CONNECTING_LINK_COLOR: "#AFA",
|
||||
BADGE_FG_COLOR: "#FFF",
|
||||
BADGE_BG_COLOR: "#0F1F0F"
|
||||
},
|
||||
comfy_base: {
|
||||
"fg-color": "#fff",
|
||||
"bg-color": "#202020",
|
||||
"comfy-menu-bg": "#353535",
|
||||
"comfy-input-bg": "#222",
|
||||
"input-text": "#ddd",
|
||||
"descrip-text": "#999",
|
||||
"drag-text": "#ccc",
|
||||
"error-text": "#ff4444",
|
||||
"border-color": "#4e4e4e",
|
||||
"tr-even-bg-color": "#222",
|
||||
"tr-odd-bg-color": "#353535",
|
||||
"content-bg": "#4e4e4e",
|
||||
"content-fg": "#fff",
|
||||
"content-hover-bg": "#222",
|
||||
"content-hover-fg": "#fff"
|
||||
}
|
||||
}
|
||||
},
|
||||
light: {
|
||||
id: "light",
|
||||
name: "Light",
|
||||
colors: {
|
||||
node_slot: {
|
||||
CLIP: "#FFA726",
|
||||
// orange
|
||||
CLIP_VISION: "#5C6BC0",
|
||||
// indigo
|
||||
CLIP_VISION_OUTPUT: "#8D6E63",
|
||||
// brown
|
||||
CONDITIONING: "#EF5350",
|
||||
// red
|
||||
CONTROL_NET: "#66BB6A",
|
||||
// green
|
||||
IMAGE: "#42A5F5",
|
||||
// blue
|
||||
LATENT: "#AB47BC",
|
||||
// purple
|
||||
MASK: "#9CCC65",
|
||||
// light green
|
||||
MODEL: "#7E57C2",
|
||||
// deep purple
|
||||
STYLE_MODEL: "#D4E157",
|
||||
// lime
|
||||
VAE: "#FF7043"
|
||||
// deep orange
|
||||
},
|
||||
litegraph_base: {
|
||||
BACKGROUND_IMAGE: "data:image/gif;base64,R0lGODlhZABkALMAAAAAAP///+vr6+rq6ujo6Ofn5+bm5uXl5d3d3f///wAAAAAAAAAAAAAAAAAAAAAAACH5BAEAAAkALAAAAABkAGQAAAT/UMhJq7046827HkcoHkYxjgZhnGG6si5LqnIM0/fL4qwwIMAg0CAsEovBIxKhRDaNy2GUOX0KfVFrssrNdpdaqTeKBX+dZ+jYvEaTf+y4W66mC8PUdrE879f9d2mBeoNLfH+IhYBbhIx2jkiHiomQlGKPl4uZe3CaeZifnnijgkESBqipqqusra6vsLGys62SlZO4t7qbuby7CLa+wqGWxL3Gv3jByMOkjc2lw8vOoNSi0czAncXW3Njdx9Pf48/Z4Kbbx+fQ5evZ4u3k1fKR6cn03vHlp7T9/v8A/8Gbp4+gwXoFryXMB2qgwoMMHyKEqA5fxX322FG8tzBcRnMW/zlulPbRncmQGidKjMjyYsOSKEF2FBlJQMCbOHP6c9iSZs+UnGYCdbnSo1CZI5F64kn0p1KnTH02nSoV3dGTV7FFHVqVq1dtWcMmVQZTbNGu72zqXMuW7danVL+6e4t1bEy6MeueBYLXrNO5Ze36jQtWsOG97wIj1vt3St/DjTEORss4nNq2mDP3e7w4r1bFkSET5hy6s2TRlD2/mSxXtSHQhCunXo26NevCpmvD/UU6tuullzULH76q92zdZG/Ltv1a+W+osI/nRmyc+fRi1Xdbh+68+0vv10dH3+77KD/i6IdnX669/frn5Zsjh4/2PXju8+8bzc9/6fj27LFnX11/+IUnXWl7BJfegm79FyB9JOl3oHgSklefgxAC+FmFGpqHIYcCfkhgfCohSKKJVo044YUMttggiBkmp6KFXw1oII24oYhjiDByaKOOHcp3Y5BD/njikSkO+eBREQAAOw==",
|
||||
CLEAR_BACKGROUND_COLOR: "lightgray",
|
||||
NODE_TITLE_COLOR: "#222",
|
||||
NODE_SELECTED_TITLE_COLOR: "#000",
|
||||
NODE_TEXT_SIZE: 14,
|
||||
NODE_TEXT_COLOR: "#444",
|
||||
NODE_SUBTEXT_SIZE: 12,
|
||||
NODE_DEFAULT_COLOR: "#F7F7F7",
|
||||
NODE_DEFAULT_BGCOLOR: "#F5F5F5",
|
||||
NODE_DEFAULT_BOXCOLOR: "#CCC",
|
||||
NODE_DEFAULT_SHAPE: "box",
|
||||
NODE_BOX_OUTLINE_COLOR: "#000",
|
||||
NODE_BYPASS_BGCOLOR: "#FF00FF",
|
||||
DEFAULT_SHADOW_COLOR: "rgba(0,0,0,0.1)",
|
||||
DEFAULT_GROUP_FONT: 24,
|
||||
WIDGET_BGCOLOR: "#D4D4D4",
|
||||
WIDGET_OUTLINE_COLOR: "#999",
|
||||
WIDGET_TEXT_COLOR: "#222",
|
||||
WIDGET_SECONDARY_TEXT_COLOR: "#555",
|
||||
LINK_COLOR: "#4CAF50",
|
||||
EVENT_LINK_COLOR: "#FF9800",
|
||||
CONNECTING_LINK_COLOR: "#2196F3",
|
||||
BADGE_FG_COLOR: "#000",
|
||||
BADGE_BG_COLOR: "#FFF"
|
||||
},
|
||||
comfy_base: {
|
||||
"fg-color": "#222",
|
||||
"bg-color": "#DDD",
|
||||
"comfy-menu-bg": "#F5F5F5",
|
||||
"comfy-input-bg": "#C9C9C9",
|
||||
"input-text": "#222",
|
||||
"descrip-text": "#444",
|
||||
"drag-text": "#555",
|
||||
"error-text": "#F44336",
|
||||
"border-color": "#888",
|
||||
"tr-even-bg-color": "#f9f9f9",
|
||||
"tr-odd-bg-color": "#fff",
|
||||
"content-bg": "#e0e0e0",
|
||||
"content-fg": "#222",
|
||||
"content-hover-bg": "#adadad",
|
||||
"content-hover-fg": "#222"
|
||||
}
|
||||
}
|
||||
},
|
||||
solarized: {
|
||||
id: "solarized",
|
||||
name: "Solarized",
|
||||
colors: {
|
||||
node_slot: {
|
||||
CLIP: "#2AB7CA",
|
||||
// light blue
|
||||
CLIP_VISION: "#6c71c4",
|
||||
// blue violet
|
||||
CLIP_VISION_OUTPUT: "#859900",
|
||||
// olive green
|
||||
CONDITIONING: "#d33682",
|
||||
// magenta
|
||||
CONTROL_NET: "#d1ffd7",
|
||||
// light mint green
|
||||
IMAGE: "#5940bb",
|
||||
// deep blue violet
|
||||
LATENT: "#268bd2",
|
||||
// blue
|
||||
MASK: "#CCC9E7",
|
||||
// light purple-gray
|
||||
MODEL: "#dc322f",
|
||||
// red
|
||||
STYLE_MODEL: "#1a998a",
|
||||
// teal
|
||||
UPSCALE_MODEL: "#054A29",
|
||||
// dark green
|
||||
VAE: "#facfad"
|
||||
// light pink-orange
|
||||
},
|
||||
litegraph_base: {
|
||||
NODE_TITLE_COLOR: "#fdf6e3",
|
||||
// Base3
|
||||
NODE_SELECTED_TITLE_COLOR: "#A9D400",
|
||||
NODE_TEXT_SIZE: 14,
|
||||
NODE_TEXT_COLOR: "#657b83",
|
||||
// Base00
|
||||
NODE_SUBTEXT_SIZE: 12,
|
||||
NODE_DEFAULT_COLOR: "#094656",
|
||||
NODE_DEFAULT_BGCOLOR: "#073642",
|
||||
// Base02
|
||||
NODE_DEFAULT_BOXCOLOR: "#839496",
|
||||
// Base0
|
||||
NODE_DEFAULT_SHAPE: "box",
|
||||
NODE_BOX_OUTLINE_COLOR: "#fdf6e3",
|
||||
// Base3
|
||||
NODE_BYPASS_BGCOLOR: "#FF00FF",
|
||||
DEFAULT_SHADOW_COLOR: "rgba(0,0,0,0.5)",
|
||||
DEFAULT_GROUP_FONT: 24,
|
||||
WIDGET_BGCOLOR: "#002b36",
|
||||
// Base03
|
||||
WIDGET_OUTLINE_COLOR: "#839496",
|
||||
// Base0
|
||||
WIDGET_TEXT_COLOR: "#fdf6e3",
|
||||
// Base3
|
||||
WIDGET_SECONDARY_TEXT_COLOR: "#93a1a1",
|
||||
// Base1
|
||||
LINK_COLOR: "#2aa198",
|
||||
// Solarized Cyan
|
||||
EVENT_LINK_COLOR: "#268bd2",
|
||||
// Solarized Blue
|
||||
CONNECTING_LINK_COLOR: "#859900"
|
||||
// Solarized Green
|
||||
},
|
||||
comfy_base: {
|
||||
"fg-color": "#fdf6e3",
|
||||
// Base3
|
||||
"bg-color": "#002b36",
|
||||
// Base03
|
||||
"comfy-menu-bg": "#073642",
|
||||
// Base02
|
||||
"comfy-input-bg": "#002b36",
|
||||
// Base03
|
||||
"input-text": "#93a1a1",
|
||||
// Base1
|
||||
"descrip-text": "#586e75",
|
||||
// Base01
|
||||
"drag-text": "#839496",
|
||||
// Base0
|
||||
"error-text": "#dc322f",
|
||||
// Solarized Red
|
||||
"border-color": "#657b83",
|
||||
// Base00
|
||||
"tr-even-bg-color": "#002b36",
|
||||
"tr-odd-bg-color": "#073642",
|
||||
"content-bg": "#657b83",
|
||||
"content-fg": "#fdf6e3",
|
||||
"content-hover-bg": "#002b36",
|
||||
"content-hover-fg": "#fdf6e3"
|
||||
}
|
||||
}
|
||||
},
|
||||
arc: {
|
||||
id: "arc",
|
||||
name: "Arc",
|
||||
colors: {
|
||||
node_slot: {
|
||||
BOOLEAN: "",
|
||||
CLIP: "#eacb8b",
|
||||
CLIP_VISION: "#A8DADC",
|
||||
CLIP_VISION_OUTPUT: "#ad7452",
|
||||
CONDITIONING: "#cf876f",
|
||||
CONTROL_NET: "#00d78d",
|
||||
CONTROL_NET_WEIGHTS: "",
|
||||
FLOAT: "",
|
||||
GLIGEN: "",
|
||||
IMAGE: "#80a1c0",
|
||||
IMAGEUPLOAD: "",
|
||||
INT: "",
|
||||
LATENT: "#b38ead",
|
||||
LATENT_KEYFRAME: "",
|
||||
MASK: "#a3bd8d",
|
||||
MODEL: "#8978a7",
|
||||
SAMPLER: "",
|
||||
SIGMAS: "",
|
||||
STRING: "",
|
||||
STYLE_MODEL: "#C2FFAE",
|
||||
T2I_ADAPTER_WEIGHTS: "",
|
||||
TAESD: "#DCC274",
|
||||
TIMESTEP_KEYFRAME: "",
|
||||
UPSCALE_MODEL: "",
|
||||
VAE: "#be616b"
|
||||
},
|
||||
litegraph_base: {
|
||||
BACKGROUND_IMAGE: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAACXBIWXMAAAsTAAALEwEAmpwYAAABcklEQVR4nO3YMUoDARgF4RfxBqZI6/0vZqFn0MYtrLIQMFN8U6V4LAtD+Jm9XG/v30OGl2e/AP7yevz4+vx45nvgF/+QGITEICQGITEIiUFIjNNC3q43u3/YnRJyPOzeQ+0e220nhRzReC8e7R7bbdvl+Jal1Bs46jEIiUFIDEJiEBKDkBhKPbZT6qHdptRTu02p53DUYxASg5AYhMQgJAYhMZR6bKfUQ7tNqad2m1LP4ajHICQGITEIiUFIDEJiKPXYTqmHdptST+02pZ7DUY9BSAxCYhASg5AYhMRQ6rGdUg/tNqWe2m1KPYejHoOQGITEICQGITEIiaHUYzulHtptSj2125R6Dkc9BiExCIlBSAxCYhASQ6nHdko9tNuUemq3KfUcjnoMQmIQEoOQGITEICSGUo/tlHpotyn11G5T6jkc9RiExCAkBiExCIlBSAylHtsp9dBuU+qp3abUczjqMQiJQUgMQmIQEoOQGITE+AHFISNQrFTGuwAAAABJRU5ErkJggg==",
|
||||
CLEAR_BACKGROUND_COLOR: "#2b2f38",
|
||||
NODE_TITLE_COLOR: "#b2b7bd",
|
||||
NODE_SELECTED_TITLE_COLOR: "#FFF",
|
||||
NODE_TEXT_SIZE: 14,
|
||||
NODE_TEXT_COLOR: "#AAA",
|
||||
NODE_SUBTEXT_SIZE: 12,
|
||||
NODE_DEFAULT_COLOR: "#2b2f38",
|
||||
NODE_DEFAULT_BGCOLOR: "#242730",
|
||||
NODE_DEFAULT_BOXCOLOR: "#6e7581",
|
||||
NODE_DEFAULT_SHAPE: "box",
|
||||
NODE_BOX_OUTLINE_COLOR: "#FFF",
|
||||
NODE_BYPASS_BGCOLOR: "#FF00FF",
|
||||
DEFAULT_SHADOW_COLOR: "rgba(0,0,0,0.5)",
|
||||
DEFAULT_GROUP_FONT: 22,
|
||||
WIDGET_BGCOLOR: "#2b2f38",
|
||||
WIDGET_OUTLINE_COLOR: "#6e7581",
|
||||
WIDGET_TEXT_COLOR: "#DDD",
|
||||
WIDGET_SECONDARY_TEXT_COLOR: "#b2b7bd",
|
||||
LINK_COLOR: "#9A9",
|
||||
EVENT_LINK_COLOR: "#A86",
|
||||
CONNECTING_LINK_COLOR: "#AFA"
|
||||
},
|
||||
comfy_base: {
|
||||
"fg-color": "#fff",
|
||||
"bg-color": "#2b2f38",
|
||||
"comfy-menu-bg": "#242730",
|
||||
"comfy-input-bg": "#2b2f38",
|
||||
"input-text": "#ddd",
|
||||
"descrip-text": "#b2b7bd",
|
||||
"drag-text": "#ccc",
|
||||
"error-text": "#ff4444",
|
||||
"border-color": "#6e7581",
|
||||
"tr-even-bg-color": "#2b2f38",
|
||||
"tr-odd-bg-color": "#242730",
|
||||
"content-bg": "#6e7581",
|
||||
"content-fg": "#fff",
|
||||
"content-hover-bg": "#2b2f38",
|
||||
"content-hover-fg": "#fff"
|
||||
}
|
||||
}
|
||||
},
|
||||
nord: {
|
||||
id: "nord",
|
||||
name: "Nord",
|
||||
colors: {
|
||||
node_slot: {
|
||||
BOOLEAN: "",
|
||||
CLIP: "#eacb8b",
|
||||
CLIP_VISION: "#A8DADC",
|
||||
CLIP_VISION_OUTPUT: "#ad7452",
|
||||
CONDITIONING: "#cf876f",
|
||||
CONTROL_NET: "#00d78d",
|
||||
CONTROL_NET_WEIGHTS: "",
|
||||
FLOAT: "",
|
||||
GLIGEN: "",
|
||||
IMAGE: "#80a1c0",
|
||||
IMAGEUPLOAD: "",
|
||||
INT: "",
|
||||
LATENT: "#b38ead",
|
||||
LATENT_KEYFRAME: "",
|
||||
MASK: "#a3bd8d",
|
||||
MODEL: "#8978a7",
|
||||
SAMPLER: "",
|
||||
SIGMAS: "",
|
||||
STRING: "",
|
||||
STYLE_MODEL: "#C2FFAE",
|
||||
T2I_ADAPTER_WEIGHTS: "",
|
||||
TAESD: "#DCC274",
|
||||
TIMESTEP_KEYFRAME: "",
|
||||
UPSCALE_MODEL: "",
|
||||
VAE: "#be616b"
|
||||
},
|
||||
litegraph_base: {
|
||||
BACKGROUND_IMAGE: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAIAAAD/gAIDAAAACXBIWXMAAAsTAAALEwEAmpwYAAAFu2lUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgOS4xLWMwMDEgNzkuMTQ2Mjg5OSwgMjAyMy8wNi8yNS0yMDowMTo1NSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0RXZ0PSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VFdmVudCMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIiB4bXA6Q3JlYXRlRGF0ZT0iMjAyMy0xMS0xM1QwMDoxODowMiswMTowMCIgeG1wOk1vZGlmeURhdGU9IjIwMjMtMTEtMTVUMDE6MjA6NDUrMDE6MDAiIHhtcDpNZXRhZGF0YURhdGU9IjIwMjMtMTEtMTVUMDE6MjA6NDUrMDE6MDAiIGRjOmZvcm1hdD0iaW1hZ2UvcG5nIiBwaG90b3Nob3A6Q29sb3JNb2RlPSIzIiB4bXBNTTpJbnN0YW5jZUlEPSJ4bXAuaWlkOjUwNDFhMmZjLTEzNzQtMTk0ZC1hZWY4LTYxMzM1MTVmNjUwMCIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDoyMzFiMTBiMC1iNGZiLTAyNGUtYjEyZS0zMDUzMDNjZDA3YzgiIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDoyMzFiMTBiMC1iNGZiLTAyNGUtYjEyZS0zMDUzMDNjZDA3YzgiPiA8eG1wTU06SGlzdG9yeT4gPHJkZjpTZXE+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJjcmVhdGVkIiBzdEV2dDppbnN0YW5jZUlEPSJ4bXAuaWlkOjIzMWIxMGIwLWI0ZmItMDI0ZS1iMTJlLTMwNTMwM2NkMDdjOCIgc3RFdnQ6d2hlbj0iMjAyMy0xMS0xM1QwMDoxODowMiswMTowMCIgc3RFdnQ6c29mdHdhcmVBZ2VudD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDo1MDQxYTJmYy0xMzc0LTE5NGQtYWVmOC02MTMzNTE1ZjY1MDAiIHN0RXZ0OndoZW49IjIwMjMtMTEtMTVUMDE6MjA6NDUrMDE6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCAyNS4xIChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz73jWg/AAAAyUlEQVR42u3WKwoAIBRFQRdiMb1idv9Lsxn9gEFw4Dbb8JCTojbbXEJwjJVL2HKwYMGCBQuWLbDmjr+9zrBGjHl1WVcvy2DBggULFizTWQpewSt4HzwsgwULFiwFr7MUvMtS8D54WLBgGSxYCl7BK3iXZbBgwYIFC5bpLAWv4BW8Dx6WwYIFC5aC11kK3mUpeB88LFiwDBYsBa/gFbzLMliwYMGCBct0loJX8AreBw/LYMGCBUvB6ywF77IUvA8eFixYBgsWrNfWAZPltufdad+1AAAAAElFTkSuQmCC",
|
||||
CLEAR_BACKGROUND_COLOR: "#212732",
|
||||
NODE_TITLE_COLOR: "#999",
|
||||
NODE_SELECTED_TITLE_COLOR: "#e5eaf0",
|
||||
NODE_TEXT_SIZE: 14,
|
||||
NODE_TEXT_COLOR: "#bcc2c8",
|
||||
NODE_SUBTEXT_SIZE: 12,
|
||||
NODE_DEFAULT_COLOR: "#2e3440",
|
||||
NODE_DEFAULT_BGCOLOR: "#161b22",
|
||||
NODE_DEFAULT_BOXCOLOR: "#545d70",
|
||||
NODE_DEFAULT_SHAPE: "box",
|
||||
NODE_BOX_OUTLINE_COLOR: "#e5eaf0",
|
||||
NODE_BYPASS_BGCOLOR: "#FF00FF",
|
||||
DEFAULT_SHADOW_COLOR: "rgba(0,0,0,0.5)",
|
||||
DEFAULT_GROUP_FONT: 24,
|
||||
WIDGET_BGCOLOR: "#2e3440",
|
||||
WIDGET_OUTLINE_COLOR: "#545d70",
|
||||
WIDGET_TEXT_COLOR: "#bcc2c8",
|
||||
WIDGET_SECONDARY_TEXT_COLOR: "#999",
|
||||
LINK_COLOR: "#9A9",
|
||||
EVENT_LINK_COLOR: "#A86",
|
||||
CONNECTING_LINK_COLOR: "#AFA"
|
||||
},
|
||||
comfy_base: {
|
||||
"fg-color": "#e5eaf0",
|
||||
"bg-color": "#2e3440",
|
||||
"comfy-menu-bg": "#161b22",
|
||||
"comfy-input-bg": "#2e3440",
|
||||
"input-text": "#bcc2c8",
|
||||
"descrip-text": "#999",
|
||||
"drag-text": "#ccc",
|
||||
"error-text": "#ff4444",
|
||||
"border-color": "#545d70",
|
||||
"tr-even-bg-color": "#2e3440",
|
||||
"tr-odd-bg-color": "#161b22",
|
||||
"content-bg": "#545d70",
|
||||
"content-fg": "#e5eaf0",
|
||||
"content-hover-bg": "#2e3440",
|
||||
"content-hover-fg": "#e5eaf0"
|
||||
}
|
||||
}
|
||||
},
|
||||
github: {
|
||||
id: "github",
|
||||
name: "Github",
|
||||
colors: {
|
||||
node_slot: {
|
||||
BOOLEAN: "",
|
||||
CLIP: "#eacb8b",
|
||||
CLIP_VISION: "#A8DADC",
|
||||
CLIP_VISION_OUTPUT: "#ad7452",
|
||||
CONDITIONING: "#cf876f",
|
||||
CONTROL_NET: "#00d78d",
|
||||
CONTROL_NET_WEIGHTS: "",
|
||||
FLOAT: "",
|
||||
GLIGEN: "",
|
||||
IMAGE: "#80a1c0",
|
||||
IMAGEUPLOAD: "",
|
||||
INT: "",
|
||||
LATENT: "#b38ead",
|
||||
LATENT_KEYFRAME: "",
|
||||
MASK: "#a3bd8d",
|
||||
MODEL: "#8978a7",
|
||||
SAMPLER: "",
|
||||
SIGMAS: "",
|
||||
STRING: "",
|
||||
STYLE_MODEL: "#C2FFAE",
|
||||
T2I_ADAPTER_WEIGHTS: "",
|
||||
TAESD: "#DCC274",
|
||||
TIMESTEP_KEYFRAME: "",
|
||||
UPSCALE_MODEL: "",
|
||||
VAE: "#be616b"
|
||||
},
|
||||
litegraph_base: {
|
||||
BACKGROUND_IMAGE: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAIAAAD/gAIDAAAACXBIWXMAAAsTAAALEwEAmpwYAAAGlmlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgOS4xLWMwMDEgNzkuMTQ2Mjg5OSwgMjAyMy8wNi8yNS0yMDowMTo1NSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0RXZ0PSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VFdmVudCMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIiB4bXA6Q3JlYXRlRGF0ZT0iMjAyMy0xMS0xM1QwMDoxODowMiswMTowMCIgeG1wOk1vZGlmeURhdGU9IjIwMjMtMTEtMTVUMDI6MDQ6NTkrMDE6MDAiIHhtcDpNZXRhZGF0YURhdGU9IjIwMjMtMTEtMTVUMDI6MDQ6NTkrMDE6MDAiIGRjOmZvcm1hdD0iaW1hZ2UvcG5nIiBwaG90b3Nob3A6Q29sb3JNb2RlPSIzIiB4bXBNTTpJbnN0YW5jZUlEPSJ4bXAuaWlkOmIyYzRhNjA5LWJmYTctYTg0MC1iOGFlLTk3MzE2ZjM1ZGIyNyIgeG1wTU06RG9jdW1lbnRJRD0iYWRvYmU6ZG9jaWQ6cGhvdG9zaG9wOjk0ZmNlZGU4LTE1MTctZmQ0MC04ZGU3LWYzOTgxM2E3ODk5ZiIgeG1wTU06T3JpZ2luYWxEb2N1bWVudElEPSJ4bXAuZGlkOjIzMWIxMGIwLWI0ZmItMDI0ZS1iMTJlLTMwNTMwM2NkMDdjOCI+IDx4bXBNTTpIaXN0b3J5PiA8cmRmOlNlcT4gPHJkZjpsaSBzdEV2dDphY3Rpb249ImNyZWF0ZWQiIHN0RXZ0Omluc3RhbmNlSUQ9InhtcC5paWQ6MjMxYjEwYjAtYjRmYi0wMjRlLWIxMmUtMzA1MzAzY2QwN2M4IiBzdEV2dDp3aGVuPSIyMDIzLTExLTEzVDAwOjE4OjAyKzAxOjAwIiBzdEV2dDpzb2Z0d2FyZUFnZW50PSJBZG9iZSBQaG90b3Nob3AgMjUuMSAoV2luZG93cykiLz4gPHJkZjpsaSBzdEV2dDphY3Rpb249InNhdmVkIiBzdEV2dDppbnN0YW5jZUlEPSJ4bXAuaWlkOjQ4OWY1NzlmLTJkNjUtZWQ0Zi04OTg0LTA4NGE2MGE1ZTMzNSIgc3RFdnQ6d2hlbj0iMjAyMy0xMS0xNVQwMjowNDo1OSswMTowMCIgc3RFdnQ6c29mdHdhcmVBZ2VudD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIiBzdEV2dDpjaGFuZ2VkPSIvIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDpiMmM0YTYwOS1iZmE3LWE4NDAtYjhhZS05NzMxNmYzNWRiMjciIHN0RXZ0OndoZW49IjIwMjMtMTEtMTVUMDI6MDQ6NTkrMDE6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCAyNS4xIChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz4OTe6GAAAAx0lEQVR42u3WMQoAIQxFwRzJys77X8vSLiRgITif7bYbgrwYc/mKXyBoY4VVBgsWLFiwYFmOlTv+9jfDOjHmr8u6eVkGCxYsWLBgmc5S8ApewXvgYRksWLBgKXidpeBdloL3wMOCBctgwVLwCl7BuyyDBQsWLFiwTGcpeAWv4D3wsAwWLFiwFLzOUvAuS8F74GHBgmWwYCl4Ba/gXZbBggULFixYprMUvIJX8B54WAYLFixYCl5nKXiXpeA98LBgwTJYsGC9tg1o8f4TTtqzNQAAAABJRU5ErkJggg==",
|
||||
CLEAR_BACKGROUND_COLOR: "#040506",
|
||||
NODE_TITLE_COLOR: "#999",
|
||||
NODE_SELECTED_TITLE_COLOR: "#e5eaf0",
|
||||
NODE_TEXT_SIZE: 14,
|
||||
NODE_TEXT_COLOR: "#bcc2c8",
|
||||
NODE_SUBTEXT_SIZE: 12,
|
||||
NODE_DEFAULT_COLOR: "#161b22",
|
||||
NODE_DEFAULT_BGCOLOR: "#13171d",
|
||||
NODE_DEFAULT_BOXCOLOR: "#30363d",
|
||||
NODE_DEFAULT_SHAPE: "box",
|
||||
NODE_BOX_OUTLINE_COLOR: "#e5eaf0",
|
||||
NODE_BYPASS_BGCOLOR: "#FF00FF",
|
||||
DEFAULT_SHADOW_COLOR: "rgba(0,0,0,0.5)",
|
||||
DEFAULT_GROUP_FONT: 24,
|
||||
WIDGET_BGCOLOR: "#161b22",
|
||||
WIDGET_OUTLINE_COLOR: "#30363d",
|
||||
WIDGET_TEXT_COLOR: "#bcc2c8",
|
||||
WIDGET_SECONDARY_TEXT_COLOR: "#999",
|
||||
LINK_COLOR: "#9A9",
|
||||
EVENT_LINK_COLOR: "#A86",
|
||||
CONNECTING_LINK_COLOR: "#AFA"
|
||||
},
|
||||
comfy_base: {
|
||||
"fg-color": "#e5eaf0",
|
||||
"bg-color": "#161b22",
|
||||
"comfy-menu-bg": "#13171d",
|
||||
"comfy-input-bg": "#161b22",
|
||||
"input-text": "#bcc2c8",
|
||||
"descrip-text": "#999",
|
||||
"drag-text": "#ccc",
|
||||
"error-text": "#ff4444",
|
||||
"border-color": "#30363d",
|
||||
"tr-even-bg-color": "#161b22",
|
||||
"tr-odd-bg-color": "#13171d",
|
||||
"content-bg": "#30363d",
|
||||
"content-fg": "#e5eaf0",
|
||||
"content-hover-bg": "#161b22",
|
||||
"content-hover-fg": "#e5eaf0"
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
const id = "Comfy.ColorPalette";
|
||||
const idCustomColorPalettes = "Comfy.CustomColorPalettes";
|
||||
const defaultColorPaletteId = "dark";
|
||||
const els = {
|
||||
select: null
|
||||
};
|
||||
const getCustomColorPalettes = /* @__PURE__ */ __name(() => {
|
||||
return app.ui.settings.getSettingValue(idCustomColorPalettes, {});
|
||||
}, "getCustomColorPalettes");
|
||||
const setCustomColorPalettes = /* @__PURE__ */ __name((customColorPalettes) => {
|
||||
return app.ui.settings.setSettingValue(
|
||||
idCustomColorPalettes,
|
||||
customColorPalettes
|
||||
);
|
||||
}, "setCustomColorPalettes");
|
||||
const defaultColorPalette = colorPalettes[defaultColorPaletteId];
|
||||
const getColorPalette = /* @__PURE__ */ __name((colorPaletteId) => {
|
||||
if (!colorPaletteId) {
|
||||
colorPaletteId = app.ui.settings.getSettingValue(id, defaultColorPaletteId);
|
||||
}
|
||||
if (colorPaletteId.startsWith("custom_")) {
|
||||
colorPaletteId = colorPaletteId.substr(7);
|
||||
let customColorPalettes = getCustomColorPalettes();
|
||||
if (customColorPalettes[colorPaletteId]) {
|
||||
return customColorPalettes[colorPaletteId];
|
||||
}
|
||||
}
|
||||
return colorPalettes[colorPaletteId];
|
||||
}, "getColorPalette");
|
||||
const setColorPalette = /* @__PURE__ */ __name((colorPaletteId) => {
|
||||
app.ui.settings.setSettingValue(id, colorPaletteId);
|
||||
}, "setColorPalette");
|
||||
app.registerExtension({
|
||||
name: id,
|
||||
init() {
|
||||
LGraphCanvas.prototype.updateBackground = function(image, clearBackgroundColor) {
|
||||
this._bg_img = new Image();
|
||||
this._bg_img.name = image;
|
||||
this._bg_img.src = image;
|
||||
this._bg_img.onload = () => {
|
||||
this.draw(true, true);
|
||||
};
|
||||
this.background_image = image;
|
||||
this.clear_background = true;
|
||||
this.clear_background_color = clearBackgroundColor;
|
||||
this._pattern = null;
|
||||
};
|
||||
},
|
||||
addCustomNodeDefs(node_defs) {
|
||||
const sortObjectKeys = /* @__PURE__ */ __name((unordered) => {
|
||||
return Object.keys(unordered).sort().reduce((obj, key) => {
|
||||
obj[key] = unordered[key];
|
||||
return obj;
|
||||
}, {});
|
||||
}, "sortObjectKeys");
|
||||
function getSlotTypes() {
|
||||
var types = [];
|
||||
const defs = node_defs;
|
||||
for (const nodeId in defs) {
|
||||
const nodeData = defs[nodeId];
|
||||
var inputs = nodeData["input"]["required"];
|
||||
if (nodeData["input"]["optional"] !== void 0) {
|
||||
inputs = Object.assign(
|
||||
{},
|
||||
nodeData["input"]["required"],
|
||||
nodeData["input"]["optional"]
|
||||
);
|
||||
}
|
||||
for (const inputName in inputs) {
|
||||
const inputData = inputs[inputName];
|
||||
const type = inputData[0];
|
||||
if (!Array.isArray(type)) {
|
||||
types.push(type);
|
||||
}
|
||||
}
|
||||
for (const o in nodeData["output"]) {
|
||||
const output = nodeData["output"][o];
|
||||
types.push(output);
|
||||
}
|
||||
}
|
||||
return types;
|
||||
}
|
||||
__name(getSlotTypes, "getSlotTypes");
|
||||
function completeColorPalette(colorPalette) {
|
||||
var types = getSlotTypes();
|
||||
for (const type of types) {
|
||||
if (!colorPalette.colors.node_slot[type]) {
|
||||
colorPalette.colors.node_slot[type] = "";
|
||||
}
|
||||
}
|
||||
colorPalette.colors.node_slot = sortObjectKeys(
|
||||
colorPalette.colors.node_slot
|
||||
);
|
||||
return colorPalette;
|
||||
}
|
||||
__name(completeColorPalette, "completeColorPalette");
|
||||
const getColorPaletteTemplate = /* @__PURE__ */ __name(async () => {
|
||||
let colorPalette = {
|
||||
id: "my_color_palette_unique_id",
|
||||
name: "My Color Palette",
|
||||
colors: {
|
||||
node_slot: {},
|
||||
litegraph_base: {},
|
||||
comfy_base: {}
|
||||
}
|
||||
};
|
||||
const defaultColorPalette2 = colorPalettes[defaultColorPaletteId];
|
||||
for (const key in defaultColorPalette2.colors.litegraph_base) {
|
||||
if (!colorPalette.colors.litegraph_base[key]) {
|
||||
colorPalette.colors.litegraph_base[key] = "";
|
||||
}
|
||||
}
|
||||
for (const key in defaultColorPalette2.colors.comfy_base) {
|
||||
if (!colorPalette.colors.comfy_base[key]) {
|
||||
colorPalette.colors.comfy_base[key] = "";
|
||||
}
|
||||
}
|
||||
return completeColorPalette(colorPalette);
|
||||
}, "getColorPaletteTemplate");
|
||||
const addCustomColorPalette = /* @__PURE__ */ __name(async (colorPalette) => {
|
||||
if (typeof colorPalette !== "object") {
|
||||
useToastStore().addAlert("Invalid color palette.");
|
||||
return;
|
||||
}
|
||||
if (!colorPalette.id) {
|
||||
useToastStore().addAlert("Color palette missing id.");
|
||||
return;
|
||||
}
|
||||
if (!colorPalette.name) {
|
||||
useToastStore().addAlert("Color palette missing name.");
|
||||
return;
|
||||
}
|
||||
if (!colorPalette.colors) {
|
||||
useToastStore().addAlert("Color palette missing colors.");
|
||||
return;
|
||||
}
|
||||
if (colorPalette.colors.node_slot && typeof colorPalette.colors.node_slot !== "object") {
|
||||
useToastStore().addAlert("Invalid color palette colors.node_slot.");
|
||||
return;
|
||||
}
|
||||
const customColorPalettes = getCustomColorPalettes();
|
||||
customColorPalettes[colorPalette.id] = colorPalette;
|
||||
setCustomColorPalettes(customColorPalettes);
|
||||
for (const option of els.select.childNodes) {
|
||||
if (option.value === "custom_" + colorPalette.id) {
|
||||
els.select.removeChild(option);
|
||||
}
|
||||
}
|
||||
els.select.append(
|
||||
$el("option", {
|
||||
textContent: colorPalette.name + " (custom)",
|
||||
value: "custom_" + colorPalette.id,
|
||||
selected: true
|
||||
})
|
||||
);
|
||||
setColorPalette("custom_" + colorPalette.id);
|
||||
await loadColorPalette(colorPalette);
|
||||
}, "addCustomColorPalette");
|
||||
const deleteCustomColorPalette = /* @__PURE__ */ __name(async (colorPaletteId) => {
|
||||
const customColorPalettes = getCustomColorPalettes();
|
||||
delete customColorPalettes[colorPaletteId];
|
||||
setCustomColorPalettes(customColorPalettes);
|
||||
for (const opt of els.select.childNodes) {
|
||||
const option = opt;
|
||||
if (option.value === defaultColorPaletteId) {
|
||||
option.selected = true;
|
||||
}
|
||||
if (option.value === "custom_" + colorPaletteId) {
|
||||
els.select.removeChild(option);
|
||||
}
|
||||
}
|
||||
setColorPalette(defaultColorPaletteId);
|
||||
await loadColorPalette(getColorPalette());
|
||||
}, "deleteCustomColorPalette");
|
||||
const loadColorPalette = /* @__PURE__ */ __name(async (colorPalette) => {
|
||||
colorPalette = await completeColorPalette(colorPalette);
|
||||
if (colorPalette.colors) {
|
||||
if (colorPalette.colors.node_slot) {
|
||||
Object.assign(
|
||||
app.canvas.default_connection_color_byType,
|
||||
colorPalette.colors.node_slot
|
||||
);
|
||||
Object.assign(
|
||||
LGraphCanvas.link_type_colors,
|
||||
colorPalette.colors.node_slot
|
||||
);
|
||||
}
|
||||
if (colorPalette.colors.litegraph_base) {
|
||||
app.canvas.node_title_color = colorPalette.colors.litegraph_base.NODE_TITLE_COLOR;
|
||||
app.canvas.default_link_color = colorPalette.colors.litegraph_base.LINK_COLOR;
|
||||
for (const key in colorPalette.colors.litegraph_base) {
|
||||
if (colorPalette.colors.litegraph_base.hasOwnProperty(key) && LiteGraph.hasOwnProperty(key)) {
|
||||
LiteGraph[key] = colorPalette.colors.litegraph_base[key];
|
||||
}
|
||||
}
|
||||
}
|
||||
if (colorPalette.colors.comfy_base) {
|
||||
const rootStyle = document.documentElement.style;
|
||||
for (const key in colorPalette.colors.comfy_base) {
|
||||
rootStyle.setProperty(
|
||||
"--" + key,
|
||||
colorPalette.colors.comfy_base[key]
|
||||
);
|
||||
}
|
||||
}
|
||||
if (colorPalette.colors.litegraph_base.NODE_BYPASS_BGCOLOR) {
|
||||
app.bypassBgColor = colorPalette.colors.litegraph_base.NODE_BYPASS_BGCOLOR;
|
||||
}
|
||||
app.canvas.draw(true, true);
|
||||
}
|
||||
}, "loadColorPalette");
|
||||
const fileInput = $el("input", {
|
||||
type: "file",
|
||||
accept: ".json",
|
||||
style: { display: "none" },
|
||||
parent: document.body,
|
||||
onchange: /* @__PURE__ */ __name(() => {
|
||||
const file = fileInput.files[0];
|
||||
if (file.type === "application/json" || file.name.endsWith(".json")) {
|
||||
const reader = new FileReader();
|
||||
reader.onload = async () => {
|
||||
await addCustomColorPalette(JSON.parse(reader.result));
|
||||
};
|
||||
reader.readAsText(file);
|
||||
}
|
||||
}, "onchange")
|
||||
});
|
||||
app.ui.settings.addSetting({
|
||||
id,
|
||||
category: ["Comfy", "ColorPalette"],
|
||||
name: "Color Palette",
|
||||
type: /* @__PURE__ */ __name((name, setter, value) => {
|
||||
const options = [
|
||||
...Object.values(colorPalettes).map(
|
||||
(c) => $el("option", {
|
||||
textContent: c.name,
|
||||
value: c.id,
|
||||
selected: c.id === value
|
||||
})
|
||||
),
|
||||
...Object.values(getCustomColorPalettes()).map(
|
||||
(c) => $el("option", {
|
||||
textContent: `${c.name} (custom)`,
|
||||
value: `custom_${c.id}`,
|
||||
selected: `custom_${c.id}` === value
|
||||
})
|
||||
)
|
||||
];
|
||||
els.select = $el(
|
||||
"select",
|
||||
{
|
||||
style: {
|
||||
marginBottom: "0.15rem",
|
||||
width: "100%"
|
||||
},
|
||||
onchange: /* @__PURE__ */ __name((e) => {
|
||||
setter(e.target.value);
|
||||
}, "onchange")
|
||||
},
|
||||
options
|
||||
);
|
||||
return $el("tr", [
|
||||
$el("td", [
|
||||
els.select,
|
||||
$el(
|
||||
"div",
|
||||
{
|
||||
style: {
|
||||
display: "grid",
|
||||
gap: "4px",
|
||||
gridAutoFlow: "column"
|
||||
}
|
||||
},
|
||||
[
|
||||
$el("input", {
|
||||
type: "button",
|
||||
value: "Export",
|
||||
onclick: /* @__PURE__ */ __name(async () => {
|
||||
const colorPaletteId = app.ui.settings.getSettingValue(
|
||||
id,
|
||||
defaultColorPaletteId
|
||||
);
|
||||
const colorPalette = await completeColorPalette(
|
||||
getColorPalette(colorPaletteId)
|
||||
);
|
||||
const json = JSON.stringify(colorPalette, null, 2);
|
||||
const blob = new Blob([json], { type: "application/json" });
|
||||
const url = URL.createObjectURL(blob);
|
||||
const a = $el("a", {
|
||||
href: url,
|
||||
download: colorPaletteId + ".json",
|
||||
style: { display: "none" },
|
||||
parent: document.body
|
||||
});
|
||||
a.click();
|
||||
setTimeout(function() {
|
||||
a.remove();
|
||||
window.URL.revokeObjectURL(url);
|
||||
}, 0);
|
||||
}, "onclick")
|
||||
}),
|
||||
$el("input", {
|
||||
type: "button",
|
||||
value: "Import",
|
||||
onclick: /* @__PURE__ */ __name(() => {
|
||||
fileInput.click();
|
||||
}, "onclick")
|
||||
}),
|
||||
$el("input", {
|
||||
type: "button",
|
||||
value: "Template",
|
||||
onclick: /* @__PURE__ */ __name(async () => {
|
||||
const colorPalette = await getColorPaletteTemplate();
|
||||
const json = JSON.stringify(colorPalette, null, 2);
|
||||
const blob = new Blob([json], { type: "application/json" });
|
||||
const url = URL.createObjectURL(blob);
|
||||
const a = $el("a", {
|
||||
href: url,
|
||||
download: "color_palette.json",
|
||||
style: { display: "none" },
|
||||
parent: document.body
|
||||
});
|
||||
a.click();
|
||||
setTimeout(function() {
|
||||
a.remove();
|
||||
window.URL.revokeObjectURL(url);
|
||||
}, 0);
|
||||
}, "onclick")
|
||||
}),
|
||||
$el("input", {
|
||||
type: "button",
|
||||
value: "Delete",
|
||||
onclick: /* @__PURE__ */ __name(async () => {
|
||||
let colorPaletteId = app.ui.settings.getSettingValue(
|
||||
id,
|
||||
defaultColorPaletteId
|
||||
);
|
||||
if (colorPalettes[colorPaletteId]) {
|
||||
useToastStore().addAlert(
|
||||
"You cannot delete a built-in color palette."
|
||||
);
|
||||
return;
|
||||
}
|
||||
if (colorPaletteId.startsWith("custom_")) {
|
||||
colorPaletteId = colorPaletteId.substr(7);
|
||||
}
|
||||
await deleteCustomColorPalette(colorPaletteId);
|
||||
}, "onclick")
|
||||
})
|
||||
]
|
||||
)
|
||||
])
|
||||
]);
|
||||
}, "type"),
|
||||
defaultValue: defaultColorPaletteId,
|
||||
async onChange(value) {
|
||||
if (!value) {
|
||||
return;
|
||||
}
|
||||
let palette = colorPalettes[value];
|
||||
if (palette) {
|
||||
await loadColorPalette(palette);
|
||||
} else if (value.startsWith("custom_")) {
|
||||
value = value.substr(7);
|
||||
let customColorPalettes = getCustomColorPalettes();
|
||||
if (customColorPalettes[value]) {
|
||||
palette = customColorPalettes[value];
|
||||
await loadColorPalette(customColorPalettes[value]);
|
||||
}
|
||||
}
|
||||
let { BACKGROUND_IMAGE, CLEAR_BACKGROUND_COLOR } = palette.colors.litegraph_base;
|
||||
if (BACKGROUND_IMAGE === void 0 || CLEAR_BACKGROUND_COLOR === void 0) {
|
||||
const base = colorPalettes["dark"].colors.litegraph_base;
|
||||
BACKGROUND_IMAGE = base.BACKGROUND_IMAGE;
|
||||
CLEAR_BACKGROUND_COLOR = base.CLEAR_BACKGROUND_COLOR;
|
||||
}
|
||||
app.canvas.updateBackground(BACKGROUND_IMAGE, CLEAR_BACKGROUND_COLOR);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
window.comfyAPI = window.comfyAPI || {};
|
||||
window.comfyAPI.colorPalette = window.comfyAPI.colorPalette || {};
|
||||
window.comfyAPI.colorPalette.defaultColorPalette = defaultColorPalette;
|
||||
window.comfyAPI.colorPalette.getColorPalette = getColorPalette;
|
||||
export {
|
||||
defaultColorPalette as d,
|
||||
getColorPalette as g
|
||||
};
|
||||
//# sourceMappingURL=colorPalette-D5oi2-2V.js.map
|
1
web/assets/colorPalette-D5oi2-2V.js.map
generated
vendored
1
web/assets/colorPalette-D5oi2-2V.js.map
generated
vendored
File diff suppressed because one or more lines are too long
564
web/assets/index-BHJGjcJh.css → web/assets/index-BDQCPKeJ.css
generated
vendored
564
web/assets/index-BHJGjcJh.css → web/assets/index-BDQCPKeJ.css
generated
vendored
@ -235,73 +235,33 @@
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
[data-v-e5724e4d] .p-datatable-tbody > tr > td {
|
||||
padding: 1px;
|
||||
min-height: 2rem;
|
||||
}
|
||||
[data-v-e5724e4d] .p-datatable-row-selected .actions,[data-v-e5724e4d] .p-datatable-selectable-row:hover .actions {
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
.settings-tab-panels {
|
||||
padding-top: 0px !important;
|
||||
}
|
||||
|
||||
.settings-container[data-v-fc1edb48] {
|
||||
.settings-container[data-v-63951e2f] {
|
||||
display: flex;
|
||||
height: 70vh;
|
||||
width: 60vw;
|
||||
max-width: 1000px;
|
||||
max-width: 1024px;
|
||||
overflow: hidden;
|
||||
/* Prevents container from scrolling */
|
||||
}
|
||||
.settings-sidebar[data-v-fc1edb48] {
|
||||
width: 250px;
|
||||
flex-shrink: 0;
|
||||
/* Prevents sidebar from shrinking */
|
||||
overflow-y: auto;
|
||||
padding: 10px;
|
||||
}
|
||||
.settings-search-box[data-v-fc1edb48] {
|
||||
width: 100%;
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
.settings-content[data-v-fc1edb48] {
|
||||
flex-grow: 1;
|
||||
overflow-y: auto;
|
||||
/* Allows vertical scrolling */
|
||||
}
|
||||
|
||||
/* Ensure the Listbox takes full width of the sidebar */
|
||||
.settings-sidebar[data-v-fc1edb48] .p-listbox {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
/* Optional: Style scrollbars for webkit browsers */
|
||||
.settings-sidebar[data-v-fc1edb48]::-webkit-scrollbar,
|
||||
.settings-content[data-v-fc1edb48]::-webkit-scrollbar {
|
||||
width: 1px;
|
||||
}
|
||||
.settings-sidebar[data-v-fc1edb48]::-webkit-scrollbar-thumb,
|
||||
.settings-content[data-v-fc1edb48]::-webkit-scrollbar-thumb {
|
||||
background-color: transparent;
|
||||
}
|
||||
@media (max-width: 768px) {
|
||||
.settings-container[data-v-fc1edb48] {
|
||||
.settings-container[data-v-63951e2f] {
|
||||
flex-direction: column;
|
||||
height: auto;
|
||||
}
|
||||
.settings-sidebar[data-v-fc1edb48] {
|
||||
.settings-sidebar[data-v-63951e2f] {
|
||||
width: 100%;
|
||||
}
|
||||
}
|
||||
|
||||
/* Show a separator line above the Keybinding tab */
|
||||
/* This indicates the start of custom setting panels */
|
||||
.settings-sidebar[data-v-fc1edb48] .p-listbox-option[aria-label='Keybinding'] {
|
||||
.settings-sidebar[data-v-63951e2f] .p-listbox-option[aria-label='Keybinding'] {
|
||||
position: relative;
|
||||
}
|
||||
.settings-sidebar[data-v-fc1edb48] .p-listbox-option[aria-label='Keybinding']::before {
|
||||
.settings-sidebar[data-v-63951e2f] .p-listbox-option[aria-label='Keybinding']::before {
|
||||
position: absolute;
|
||||
top: 0px;
|
||||
left: 0px;
|
||||
@ -640,6 +600,469 @@
|
||||
bottom: 41px;
|
||||
}
|
||||
|
||||
|
||||
.editable-text[data-v-54da6fc9] {
|
||||
display: inline;
|
||||
}
|
||||
.editable-text input[data-v-54da6fc9] {
|
||||
width: 100%;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.tree-node[data-v-fb2b90cf] {
|
||||
width: 100%;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
}
|
||||
.leaf-count-badge[data-v-fb2b90cf] {
|
||||
margin-left: 0.5rem;
|
||||
}
|
||||
.node-content[data-v-fb2b90cf] {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
flex-grow: 1;
|
||||
}
|
||||
.leaf-label[data-v-fb2b90cf] {
|
||||
margin-left: 0.5rem;
|
||||
}
|
||||
[data-v-fb2b90cf] .editable-text span {
|
||||
word-break: break-all;
|
||||
}
|
||||
|
||||
[data-v-bd7bae90] .tree-explorer-node-label {
|
||||
width: 100%;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
margin-left: var(--p-tree-node-gap);
|
||||
flex-grow: 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* The following styles are necessary to avoid layout shift when dragging nodes over folders.
|
||||
* By setting the position to relative on the parent and using an absolutely positioned pseudo-element,
|
||||
* we can create a visual indicator for the drop target without affecting the layout of other elements.
|
||||
*/
|
||||
[data-v-bd7bae90] .p-tree-node-content:has(.tree-folder) {
|
||||
position: relative;
|
||||
}
|
||||
[data-v-bd7bae90] .p-tree-node-content:has(.tree-folder.can-drop)::after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
border: 1px solid var(--p-content-color);
|
||||
pointer-events: none;
|
||||
}
|
||||
|
||||
.model_preview[data-v-32e6c4d9] {
|
||||
background-color: var(--comfy-menu-bg);
|
||||
font-family: 'Open Sans', sans-serif;
|
||||
color: var(--descrip-text);
|
||||
border: 1px solid var(--descrip-text);
|
||||
min-width: 300px;
|
||||
max-width: 500px;
|
||||
width: -moz-fit-content;
|
||||
width: fit-content;
|
||||
height: -moz-fit-content;
|
||||
height: fit-content;
|
||||
z-index: 9999;
|
||||
border-radius: 12px;
|
||||
overflow: hidden;
|
||||
font-size: 12px;
|
||||
padding: 10px;
|
||||
}
|
||||
.model_preview_image[data-v-32e6c4d9] {
|
||||
margin: auto;
|
||||
width: -moz-fit-content;
|
||||
width: fit-content;
|
||||
}
|
||||
.model_preview_image img[data-v-32e6c4d9] {
|
||||
max-width: 100%;
|
||||
max-height: 150px;
|
||||
-o-object-fit: contain;
|
||||
object-fit: contain;
|
||||
}
|
||||
.model_preview_title[data-v-32e6c4d9] {
|
||||
font-weight: bold;
|
||||
text-align: center;
|
||||
font-size: 14px;
|
||||
}
|
||||
.model_preview_top_container[data-v-32e6c4d9] {
|
||||
text-align: center;
|
||||
line-height: 0.5;
|
||||
}
|
||||
.model_preview_filename[data-v-32e6c4d9],
|
||||
.model_preview_author[data-v-32e6c4d9],
|
||||
.model_preview_architecture[data-v-32e6c4d9] {
|
||||
display: inline-block;
|
||||
text-align: center;
|
||||
margin: 5px;
|
||||
font-size: 10px;
|
||||
}
|
||||
.model_preview_prefix[data-v-32e6c4d9] {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.model-lib-model-icon-container[data-v-70b69131] {
|
||||
display: inline-block;
|
||||
position: relative;
|
||||
left: 0;
|
||||
height: 1.5rem;
|
||||
vertical-align: top;
|
||||
width: 0px;
|
||||
}
|
||||
.model-lib-model-icon[data-v-70b69131] {
|
||||
background-size: cover;
|
||||
background-position: center;
|
||||
display: inline-block;
|
||||
position: relative;
|
||||
left: -2.5rem;
|
||||
height: 2rem;
|
||||
width: 2rem;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
[data-v-32285943] .pi-fake-spacer {
|
||||
height: 1px;
|
||||
width: 16px;
|
||||
}
|
||||
|
||||
.slot_row[data-v-ff07c900] {
|
||||
padding: 2px;
|
||||
}
|
||||
|
||||
/* Original N-Sidebar styles */
|
||||
._sb_dot[data-v-ff07c900] {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
border-radius: 50%;
|
||||
background-color: grey;
|
||||
}
|
||||
.node_header[data-v-ff07c900] {
|
||||
line-height: 1;
|
||||
padding: 8px 13px 7px;
|
||||
margin-bottom: 5px;
|
||||
font-size: 15px;
|
||||
text-wrap: nowrap;
|
||||
overflow: hidden;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
.headdot[data-v-ff07c900] {
|
||||
width: 10px;
|
||||
height: 10px;
|
||||
float: inline-start;
|
||||
margin-right: 8px;
|
||||
}
|
||||
.IMAGE[data-v-ff07c900] {
|
||||
background-color: #64b5f6;
|
||||
}
|
||||
.VAE[data-v-ff07c900] {
|
||||
background-color: #ff6e6e;
|
||||
}
|
||||
.LATENT[data-v-ff07c900] {
|
||||
background-color: #ff9cf9;
|
||||
}
|
||||
.MASK[data-v-ff07c900] {
|
||||
background-color: #81c784;
|
||||
}
|
||||
.CONDITIONING[data-v-ff07c900] {
|
||||
background-color: #ffa931;
|
||||
}
|
||||
.CLIP[data-v-ff07c900] {
|
||||
background-color: #ffd500;
|
||||
}
|
||||
.MODEL[data-v-ff07c900] {
|
||||
background-color: #b39ddb;
|
||||
}
|
||||
.CONTROL_NET[data-v-ff07c900] {
|
||||
background-color: #a5d6a7;
|
||||
}
|
||||
._sb_node_preview[data-v-ff07c900] {
|
||||
background-color: var(--comfy-menu-bg);
|
||||
font-family: 'Open Sans', sans-serif;
|
||||
font-size: small;
|
||||
color: var(--descrip-text);
|
||||
border: 1px solid var(--descrip-text);
|
||||
min-width: 300px;
|
||||
width: -moz-min-content;
|
||||
width: min-content;
|
||||
height: -moz-fit-content;
|
||||
height: fit-content;
|
||||
z-index: 9999;
|
||||
border-radius: 12px;
|
||||
overflow: hidden;
|
||||
font-size: 12px;
|
||||
padding-bottom: 10px;
|
||||
}
|
||||
._sb_node_preview ._sb_description[data-v-ff07c900] {
|
||||
margin: 10px;
|
||||
padding: 6px;
|
||||
background: var(--border-color);
|
||||
border-radius: 5px;
|
||||
font-style: italic;
|
||||
font-weight: 500;
|
||||
font-size: 0.9rem;
|
||||
word-break: break-word;
|
||||
}
|
||||
._sb_table[data-v-ff07c900] {
|
||||
display: grid;
|
||||
|
||||
grid-column-gap: 10px;
|
||||
/* Spazio tra le colonne */
|
||||
width: 100%;
|
||||
/* Imposta la larghezza della tabella al 100% del contenitore */
|
||||
}
|
||||
._sb_row[data-v-ff07c900] {
|
||||
display: grid;
|
||||
grid-template-columns: 10px 1fr 1fr 1fr 10px;
|
||||
grid-column-gap: 10px;
|
||||
align-items: center;
|
||||
padding-left: 9px;
|
||||
padding-right: 9px;
|
||||
}
|
||||
._sb_row_string[data-v-ff07c900] {
|
||||
grid-template-columns: 10px 1fr 1fr 10fr 1fr;
|
||||
}
|
||||
._sb_col[data-v-ff07c900] {
|
||||
border: 0px solid #000;
|
||||
display: flex;
|
||||
align-items: flex-end;
|
||||
flex-direction: row-reverse;
|
||||
flex-wrap: nowrap;
|
||||
align-content: flex-start;
|
||||
justify-content: flex-end;
|
||||
}
|
||||
._sb_inherit[data-v-ff07c900] {
|
||||
display: inherit;
|
||||
}
|
||||
._long_field[data-v-ff07c900] {
|
||||
background: var(--bg-color);
|
||||
border: 2px solid var(--border-color);
|
||||
margin: 5px 5px 0 5px;
|
||||
border-radius: 10px;
|
||||
line-height: 1.7;
|
||||
text-wrap: nowrap;
|
||||
}
|
||||
._sb_arrow[data-v-ff07c900] {
|
||||
color: var(--fg-color);
|
||||
}
|
||||
._sb_preview_badge[data-v-ff07c900] {
|
||||
text-align: center;
|
||||
background: var(--comfy-input-bg);
|
||||
font-weight: bold;
|
||||
color: var(--error-text);
|
||||
}
|
||||
|
||||
.node-lib-node-container[data-v-90dfee08] {
|
||||
height: 100%;
|
||||
width: 100%
|
||||
}
|
||||
|
||||
.p-selectbutton .p-button[data-v-91077f2a] {
|
||||
padding: 0.5rem;
|
||||
}
|
||||
.p-selectbutton .p-button .pi[data-v-91077f2a] {
|
||||
font-size: 1.5rem;
|
||||
}
|
||||
.field[data-v-91077f2a] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
.color-picker-container[data-v-91077f2a] {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
|
||||
._content[data-v-e7b35fd9] {
|
||||
|
||||
display: flex;
|
||||
|
||||
flex-direction: column
|
||||
}
|
||||
._content[data-v-e7b35fd9] > :not([hidden]) ~ :not([hidden]) {
|
||||
|
||||
--tw-space-y-reverse: 0;
|
||||
|
||||
margin-top: calc(0.5rem * calc(1 - var(--tw-space-y-reverse)));
|
||||
|
||||
margin-bottom: calc(0.5rem * var(--tw-space-y-reverse))
|
||||
}
|
||||
._footer[data-v-e7b35fd9] {
|
||||
|
||||
display: flex;
|
||||
|
||||
flex-direction: column;
|
||||
|
||||
align-items: flex-end;
|
||||
|
||||
padding-top: 1rem
|
||||
}
|
||||
|
||||
.comfy-image-wrap[data-v-9bc23daf] {
|
||||
display: contents;
|
||||
}
|
||||
.comfy-image-blur[data-v-9bc23daf] {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
-o-object-fit: cover;
|
||||
object-fit: cover;
|
||||
}
|
||||
.comfy-image-main[data-v-9bc23daf] {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
-o-object-fit: cover;
|
||||
object-fit: cover;
|
||||
-o-object-position: center;
|
||||
object-position: center;
|
||||
z-index: 1;
|
||||
}
|
||||
.contain .comfy-image-wrap[data-v-9bc23daf] {
|
||||
position: relative;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
.contain .comfy-image-main[data-v-9bc23daf] {
|
||||
-o-object-fit: contain;
|
||||
object-fit: contain;
|
||||
-webkit-backdrop-filter: blur(10px);
|
||||
backdrop-filter: blur(10px);
|
||||
position: absolute;
|
||||
}
|
||||
.broken-image-placeholder[data-v-9bc23daf] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
margin: 2rem;
|
||||
}
|
||||
.broken-image-placeholder i[data-v-9bc23daf] {
|
||||
font-size: 3rem;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.result-container[data-v-62b7731e] {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
aspect-ratio: 1 / 1;
|
||||
overflow: hidden;
|
||||
position: relative;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
}
|
||||
.preview-mask[data-v-62b7731e] {
|
||||
position: absolute;
|
||||
left: 50%;
|
||||
top: 50%;
|
||||
transform: translate(-50%, -50%);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
opacity: 0;
|
||||
transition: opacity 0.3s ease;
|
||||
z-index: 1;
|
||||
}
|
||||
.result-container:hover .preview-mask[data-v-62b7731e] {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.task-result-preview[data-v-28bce53e] {
|
||||
aspect-ratio: 1 / 1;
|
||||
overflow: hidden;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
.task-result-preview i[data-v-28bce53e],
|
||||
.task-result-preview span[data-v-28bce53e] {
|
||||
font-size: 2rem;
|
||||
}
|
||||
.task-item[data-v-28bce53e] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
border-radius: 4px;
|
||||
overflow: hidden;
|
||||
position: relative;
|
||||
}
|
||||
.task-item-details[data-v-28bce53e] {
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
padding: 0.6rem;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
width: 100%;
|
||||
z-index: 1;
|
||||
}
|
||||
.task-node-link[data-v-28bce53e] {
|
||||
padding: 2px;
|
||||
}
|
||||
|
||||
/* In dark mode, transparent background color for tags is not ideal for tags that
|
||||
are floating on top of images. */
|
||||
.tag-wrapper[data-v-28bce53e] {
|
||||
background-color: var(--p-primary-contrast-color);
|
||||
border-radius: 6px;
|
||||
display: inline-flex;
|
||||
}
|
||||
.node-name-tag[data-v-28bce53e] {
|
||||
word-break: break-all;
|
||||
}
|
||||
.status-tag-group[data-v-28bce53e] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
.progress-preview-img[data-v-28bce53e] {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
-o-object-fit: cover;
|
||||
object-fit: cover;
|
||||
-o-object-position: center;
|
||||
object-position: center;
|
||||
}
|
||||
|
||||
/* PrimeVue's galleria teleports the fullscreen gallery out of subtree so we
|
||||
cannot use scoped style here. */
|
||||
img.galleria-image {
|
||||
max-width: 100vw;
|
||||
max-height: 100vh;
|
||||
-o-object-fit: contain;
|
||||
object-fit: contain;
|
||||
}
|
||||
.p-galleria-close-button {
|
||||
/* Set z-index so the close button doesn't get hidden behind the image when image is large */
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
.scroll-container[data-v-0bfbd127] {
|
||||
height: 100%;
|
||||
overflow-y: auto;
|
||||
}
|
||||
.scroll-container[data-v-0bfbd127]::-webkit-scrollbar {
|
||||
width: 1px;
|
||||
}
|
||||
.scroll-container[data-v-0bfbd127]::-webkit-scrollbar-thumb {
|
||||
background-color: transparent;
|
||||
}
|
||||
.queue-grid[data-v-0bfbd127] {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fill, minmax(200px, 1fr));
|
||||
padding: 0.5rem;
|
||||
gap: 0.5rem;
|
||||
}
|
||||
/* this CSS contains only the basic CSS needed to run the app and use it */
|
||||
|
||||
.lgraphcanvas {
|
||||
@ -1439,6 +1862,9 @@ cursor: pointer;
|
||||
.z-\[1000\]{
|
||||
z-index: 1000;
|
||||
}
|
||||
.m-2{
|
||||
margin: 0.5rem;
|
||||
}
|
||||
.mx-1{
|
||||
margin-left: 0.25rem;
|
||||
margin-right: 0.25rem;
|
||||
@ -1447,14 +1873,6 @@ cursor: pointer;
|
||||
margin-left: 0.5rem;
|
||||
margin-right: 0.5rem;
|
||||
}
|
||||
.mx-4{
|
||||
margin-left: 1rem;
|
||||
margin-right: 1rem;
|
||||
}
|
||||
.my-4{
|
||||
margin-top: 1rem;
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
.mb-2{
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
@ -1467,6 +1885,9 @@ cursor: pointer;
|
||||
.ml-2{
|
||||
margin-left: 0.5rem;
|
||||
}
|
||||
.ml-\[-13px\]{
|
||||
margin-left: -13px;
|
||||
}
|
||||
.ml-auto{
|
||||
margin-left: auto;
|
||||
}
|
||||
@ -1476,9 +1897,6 @@ cursor: pointer;
|
||||
.mr-2{
|
||||
margin-right: 0.5rem;
|
||||
}
|
||||
.mt-1{
|
||||
margin-top: 0.25rem;
|
||||
}
|
||||
.mt-4{
|
||||
margin-top: 1rem;
|
||||
}
|
||||
@ -1509,6 +1927,9 @@ cursor: pointer;
|
||||
.hidden{
|
||||
display: none;
|
||||
}
|
||||
.h-0{
|
||||
height: 0px;
|
||||
}
|
||||
.h-64{
|
||||
height: 16rem;
|
||||
}
|
||||
@ -1518,6 +1939,9 @@ cursor: pointer;
|
||||
.h-screen{
|
||||
height: 100vh;
|
||||
}
|
||||
.min-h-10{
|
||||
min-height: 2.5rem;
|
||||
}
|
||||
.min-h-screen{
|
||||
min-height: 100vh;
|
||||
}
|
||||
@ -1549,9 +1973,6 @@ cursor: pointer;
|
||||
.max-w-\[150px\]{
|
||||
max-width: 150px;
|
||||
}
|
||||
.flex-shrink{
|
||||
flex-shrink: 1;
|
||||
}
|
||||
.flex-shrink-0{
|
||||
flex-shrink: 0;
|
||||
}
|
||||
@ -1634,6 +2055,9 @@ cursor: pointer;
|
||||
.overflow-y-auto{
|
||||
overflow-y: auto;
|
||||
}
|
||||
.overflow-x-hidden{
|
||||
overflow-x: hidden;
|
||||
}
|
||||
.truncate{
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
@ -1663,9 +2087,19 @@ cursor: pointer;
|
||||
.border{
|
||||
border-width: 1px;
|
||||
}
|
||||
.border-x-0{
|
||||
border-left-width: 0px;
|
||||
border-right-width: 0px;
|
||||
}
|
||||
.border-t-0{
|
||||
border-top-width: 0px;
|
||||
}
|
||||
.border-none{
|
||||
border-style: none;
|
||||
}
|
||||
.bg-\[var\(--p-tree-background\)\]{
|
||||
background-color: var(--p-tree-background);
|
||||
}
|
||||
.bg-black{
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(0 0 0 / var(--tw-bg-opacity));
|
||||
@ -1698,6 +2132,12 @@ cursor: pointer;
|
||||
.p-1{
|
||||
padding: 0.25rem;
|
||||
}
|
||||
.p-2{
|
||||
padding: 0.5rem;
|
||||
}
|
||||
.p-4{
|
||||
padding: 1rem;
|
||||
}
|
||||
.px-0{
|
||||
padding-left: 0px;
|
||||
padding-right: 0px;
|
||||
@ -1710,6 +2150,10 @@ cursor: pointer;
|
||||
padding-top: 0px;
|
||||
padding-bottom: 0px;
|
||||
}
|
||||
.py-1{
|
||||
padding-top: 0.25rem;
|
||||
padding-bottom: 0.25rem;
|
||||
}
|
||||
.pb-0{
|
||||
padding-bottom: 0px;
|
||||
}
|
1
web/assets/index-BMC1ey-i.js.map
generated
vendored
1
web/assets/index-BMC1ey-i.js.map
generated
vendored
File diff suppressed because one or more lines are too long
102359
web/assets/index-DGAbdBYF.js → web/assets/index-CgU1oKZt.js
generated
vendored
102359
web/assets/index-DGAbdBYF.js → web/assets/index-CgU1oKZt.js
generated
vendored
File diff suppressed because one or more lines are too long
1
web/assets/index-CgU1oKZt.js.map
generated
vendored
Normal file
1
web/assets/index-CgU1oKZt.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
113
web/assets/index-BMC1ey-i.js → web/assets/index-D36_Nnai.js
generated
vendored
113
web/assets/index-BMC1ey-i.js → web/assets/index-D36_Nnai.js
generated
vendored
@ -1,8 +1,7 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { c9 as ComfyDialog, ca as $el, cb as ComfyApp, k as app, z as LiteGraph, aP as LGraphCanvas, cc as DraggableList, bO as useToastStore, aq as useNodeDefStore, b4 as api, L as LGraphGroup, cd as KeyComboImpl, aT as useKeybindingStore, aL as useCommandStore, l as LGraphNode, ce as ComfyWidgets, cf as applyTextReplacements, aA as NodeSourceType, cg as NodeBadgeMode, h as useSettingStore, F as computed, w as watch, ch as BadgePosition, aR as LGraphBadge, au as _ } from "./index-DGAbdBYF.js";
|
||||
import { g as getColorPalette, d as defaultColorPalette } from "./colorPalette-D5oi2-2V.js";
|
||||
import { mergeIfValid, getWidgetConfig, setWidgetConfig } from "./widgetInputs-DdoWwzg5.js";
|
||||
import { bu as ComfyDialog, bv as $el, bw as ComfyApp, c as app, k as LiteGraph, aP as LGraphCanvas, bx as DraggableList, a_ as useToastStore, ax as useNodeDefStore, bq as api, L as LGraphGroup, by as KeyComboImpl, K as useKeybindingStore, F as useCommandStore, e as LGraphNode, bz as ComfyWidgets, bA as applyTextReplacements, av as NodeSourceType, bB as NodeBadgeMode, u as useSettingStore, q as computed, bC as getColorPalette, w as watch, bD as BadgePosition, aR as LGraphBadge, bE as _, bF as defaultColorPalette } from "./index-CgU1oKZt.js";
|
||||
import { mergeIfValid, getWidgetConfig, setWidgetConfig } from "./widgetInputs-DNVvusS1.js";
|
||||
class ClipspaceDialog extends ComfyDialog {
|
||||
static {
|
||||
__name(this, "ClipspaceDialog");
|
||||
@ -339,7 +338,7 @@ app.registerExtension({
|
||||
if (text[start] === "(") openCount++;
|
||||
if (text[start] === ")") closeCount++;
|
||||
}
|
||||
if (start < 0) return false;
|
||||
if (start < 0) return null;
|
||||
openCount = 0;
|
||||
closeCount = 0;
|
||||
while (end < text.length) {
|
||||
@ -348,7 +347,7 @@ app.registerExtension({
|
||||
if (text[end] === ")") closeCount++;
|
||||
end++;
|
||||
}
|
||||
if (end === text.length) return false;
|
||||
if (end === text.length) return null;
|
||||
return { start: start + 1, end };
|
||||
}
|
||||
__name(findNearestEnclosure, "findNearestEnclosure");
|
||||
@ -1637,9 +1636,7 @@ class GroupNodeHandler {
|
||||
},
|
||||
{
|
||||
content: "Manage Group Node",
|
||||
callback: /* @__PURE__ */ __name(() => {
|
||||
new ManageGroupDialog(app).show(this.type);
|
||||
}, "callback")
|
||||
callback: manageGroupNodes
|
||||
}
|
||||
);
|
||||
};
|
||||
@ -1960,9 +1957,7 @@ function addConvertToGroupOptions() {
|
||||
options.splice(index + 1, null, {
|
||||
content: `Convert to Group Node`,
|
||||
disabled,
|
||||
callback: /* @__PURE__ */ __name(async () => {
|
||||
return await GroupNodeHandler.fromNodes(selected);
|
||||
}, "callback")
|
||||
callback: convertSelectedNodesToGroupNode
|
||||
});
|
||||
}
|
||||
__name(addConvertOption, "addConvertOption");
|
||||
@ -1972,9 +1967,7 @@ function addConvertToGroupOptions() {
|
||||
options.splice(index + 1, null, {
|
||||
content: `Manage Group Nodes`,
|
||||
disabled,
|
||||
callback: /* @__PURE__ */ __name(() => {
|
||||
new ManageGroupDialog(app).show();
|
||||
}, "callback")
|
||||
callback: manageGroupNodes
|
||||
});
|
||||
}
|
||||
__name(addManageOption, "addManageOption");
|
||||
@ -2004,10 +1997,77 @@ const replaceLegacySeparators = /* @__PURE__ */ __name((nodes) => {
|
||||
}
|
||||
}
|
||||
}, "replaceLegacySeparators");
|
||||
async function convertSelectedNodesToGroupNode() {
|
||||
const nodes = Object.values(app.canvas.selected_nodes ?? {});
|
||||
if (nodes.length === 0) {
|
||||
throw new Error("No nodes selected");
|
||||
}
|
||||
if (nodes.length === 1) {
|
||||
throw new Error("Please select multiple nodes to convert to group node");
|
||||
}
|
||||
if (nodes.some((n) => GroupNodeHandler.isGroupNode(n))) {
|
||||
throw new Error("Selected nodes contain a group node");
|
||||
}
|
||||
return await GroupNodeHandler.fromNodes(nodes);
|
||||
}
|
||||
__name(convertSelectedNodesToGroupNode, "convertSelectedNodesToGroupNode");
|
||||
function ungroupSelectedGroupNodes() {
|
||||
const nodes = Object.values(app.canvas.selected_nodes ?? {});
|
||||
for (const node of nodes) {
|
||||
if (GroupNodeHandler.isGroupNode(node)) {
|
||||
node["convertToNodes"]?.();
|
||||
}
|
||||
}
|
||||
}
|
||||
__name(ungroupSelectedGroupNodes, "ungroupSelectedGroupNodes");
|
||||
function manageGroupNodes() {
|
||||
new ManageGroupDialog(app).show();
|
||||
}
|
||||
__name(manageGroupNodes, "manageGroupNodes");
|
||||
const id$3 = "Comfy.GroupNode";
|
||||
let globalDefs;
|
||||
const ext$1 = {
|
||||
name: id$3,
|
||||
commands: [
|
||||
{
|
||||
id: "Comfy.GroupNode.ConvertSelectedNodesToGroupNode",
|
||||
label: "Convert selected nodes to group node",
|
||||
icon: "pi pi-sitemap",
|
||||
versionAdded: "1.3.17",
|
||||
function: convertSelectedNodesToGroupNode
|
||||
},
|
||||
{
|
||||
id: "Comfy.GroupNode.UngroupSelectedGroupNodes",
|
||||
label: "Ungroup selected group nodes",
|
||||
icon: "pi pi-sitemap",
|
||||
versionAdded: "1.3.17",
|
||||
function: ungroupSelectedGroupNodes
|
||||
},
|
||||
{
|
||||
id: "Comfy.GroupNode.ManageGroupNodes",
|
||||
label: "Manage group nodes",
|
||||
icon: "pi pi-cog",
|
||||
versionAdded: "1.3.17",
|
||||
function: manageGroupNodes
|
||||
}
|
||||
],
|
||||
keybindings: [
|
||||
{
|
||||
commandId: "Comfy.GroupNode.ConvertSelectedNodesToGroupNode",
|
||||
combo: {
|
||||
alt: true,
|
||||
key: "g"
|
||||
}
|
||||
},
|
||||
{
|
||||
commandId: "Comfy.GroupNode.UngroupSelectedGroupNodes",
|
||||
combo: {
|
||||
alt: true,
|
||||
shift: true,
|
||||
key: "G"
|
||||
}
|
||||
}
|
||||
],
|
||||
setup() {
|
||||
addConvertToGroupOptions();
|
||||
},
|
||||
@ -4172,10 +4232,19 @@ app.registerExtension({
|
||||
LiteGraph.CANVAS_GRID_SIZE = +value || 10;
|
||||
}
|
||||
});
|
||||
const alwaysSnapToGrid = app.ui.settings.addSetting({
|
||||
id: "pysssss.SnapToGrid",
|
||||
category: ["Comfy", "Graph", "AlwaysSnapToGrid"],
|
||||
name: "Always snap to grid",
|
||||
type: "boolean",
|
||||
defaultValue: false,
|
||||
versionAdded: "1.3.13"
|
||||
});
|
||||
const shouldSnapToGrid = /* @__PURE__ */ __name(() => app.shiftDown || alwaysSnapToGrid.value, "shouldSnapToGrid");
|
||||
const onNodeMoved = app.canvas.onNodeMoved;
|
||||
app.canvas.onNodeMoved = function(node) {
|
||||
const r = onNodeMoved?.apply(this, arguments);
|
||||
if (app.shiftDown) {
|
||||
if (shouldSnapToGrid()) {
|
||||
for (const id2 in this.selected_nodes) {
|
||||
this.selected_nodes[id2].alignToGrid();
|
||||
}
|
||||
@ -4186,7 +4255,7 @@ app.registerExtension({
|
||||
app.graph.onNodeAdded = function(node) {
|
||||
const onResize = node.onResize;
|
||||
node.onResize = function() {
|
||||
if (app.shiftDown) {
|
||||
if (shouldSnapToGrid()) {
|
||||
roundVectorToGrid(node.size);
|
||||
}
|
||||
return onResize?.apply(this, arguments);
|
||||
@ -4195,7 +4264,7 @@ app.registerExtension({
|
||||
};
|
||||
const origDrawNode = LGraphCanvas.prototype.drawNode;
|
||||
LGraphCanvas.prototype.drawNode = function(node, ctx) {
|
||||
if (app.shiftDown && this.node_dragged && node.id in this.selected_nodes) {
|
||||
if (shouldSnapToGrid() && this.node_dragged && node.id in this.selected_nodes) {
|
||||
const [x, y] = roundVectorToGrid([...node.pos]);
|
||||
const shiftX = x - node.pos[0];
|
||||
let shiftY = y - node.pos[1];
|
||||
@ -4227,7 +4296,7 @@ app.registerExtension({
|
||||
if (!selectedAndMovingGroup && app.canvas.selected_group === this && (deltax || deltay)) {
|
||||
selectedAndMovingGroup = this;
|
||||
}
|
||||
if (app.canvas.last_mouse_dragging === false && app.shiftDown) {
|
||||
if (app.canvas.last_mouse_dragging === false && shouldSnapToGrid()) {
|
||||
this.recomputeInsideNodes();
|
||||
for (const node of this.nodes) {
|
||||
node.alignToGrid();
|
||||
@ -4238,7 +4307,7 @@ app.registerExtension({
|
||||
};
|
||||
const drawGroups = LGraphCanvas.prototype.drawGroups;
|
||||
LGraphCanvas.prototype.drawGroups = function(canvas, ctx) {
|
||||
if (this.selected_group && app.shiftDown) {
|
||||
if (this.selected_group && shouldSnapToGrid()) {
|
||||
if (this.selected_group_resizing) {
|
||||
roundVectorToGrid(this.selected_group.size);
|
||||
} else if (selectedAndMovingGroup) {
|
||||
@ -4261,7 +4330,7 @@ app.registerExtension({
|
||||
const onGroupAdd = LGraphCanvas.onGroupAdd;
|
||||
LGraphCanvas.onGroupAdd = function() {
|
||||
const v = onGroupAdd.apply(app.canvas, arguments);
|
||||
if (app.shiftDown) {
|
||||
if (shouldSnapToGrid()) {
|
||||
const lastGroup = app.graph.groups[app.graph.groups.length - 1];
|
||||
if (lastGroup) {
|
||||
roundVectorToGrid(lastGroup.pos);
|
||||
@ -4274,7 +4343,7 @@ app.registerExtension({
|
||||
});
|
||||
app.registerExtension({
|
||||
name: "Comfy.UploadImage",
|
||||
async beforeRegisterNodeDef(nodeType, nodeData, app2) {
|
||||
beforeRegisterNodeDef(nodeType, nodeData) {
|
||||
if (nodeData?.input?.required?.image?.[1]?.image_upload === true) {
|
||||
nodeData.input.required.upload = ["IMAGEUPLOAD"];
|
||||
}
|
||||
@ -4662,4 +4731,4 @@ class NodeBadgeExtension {
|
||||
}
|
||||
}
|
||||
app.registerExtension(new NodeBadgeExtension());
|
||||
//# sourceMappingURL=index-BMC1ey-i.js.map
|
||||
//# sourceMappingURL=index-D36_Nnai.js.map
|
1
web/assets/index-D36_Nnai.js.map
generated
vendored
Normal file
1
web/assets/index-D36_Nnai.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
8997
web/assets/index-DBWDcZsl.js
generated
vendored
Normal file
8997
web/assets/index-DBWDcZsl.js
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1
web/assets/index-DBWDcZsl.js.map
generated
vendored
Normal file
1
web/assets/index-DBWDcZsl.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1
web/assets/index-DGAbdBYF.js.map
generated
vendored
1
web/assets/index-DGAbdBYF.js.map
generated
vendored
File diff suppressed because one or more lines are too long
102
web/assets/index-DYEEBf64.js
generated
vendored
Normal file
102
web/assets/index-DYEEBf64.js
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { bM as script$4, A as createBaseVNode, g as openBlock, h as createElementBlock, m as mergeProps } from "./index-CgU1oKZt.js";
|
||||
var script$3 = {
|
||||
name: "BarsIcon",
|
||||
"extends": script$4
|
||||
};
|
||||
var _hoisted_1$3 = /* @__PURE__ */ createBaseVNode("path", {
|
||||
"fill-rule": "evenodd",
|
||||
"clip-rule": "evenodd",
|
||||
d: "M13.3226 3.6129H0.677419C0.497757 3.6129 0.325452 3.54152 0.198411 3.41448C0.0713707 3.28744 0 3.11514 0 2.93548C0 2.75581 0.0713707 2.58351 0.198411 2.45647C0.325452 2.32943 0.497757 2.25806 0.677419 2.25806H13.3226C13.5022 2.25806 13.6745 2.32943 13.8016 2.45647C13.9286 2.58351 14 2.75581 14 2.93548C14 3.11514 13.9286 3.28744 13.8016 3.41448C13.6745 3.54152 13.5022 3.6129 13.3226 3.6129ZM13.3226 7.67741H0.677419C0.497757 7.67741 0.325452 7.60604 0.198411 7.479C0.0713707 7.35196 0 7.17965 0 6.99999C0 6.82033 0.0713707 6.64802 0.198411 6.52098C0.325452 6.39394 0.497757 6.32257 0.677419 6.32257H13.3226C13.5022 6.32257 13.6745 6.39394 13.8016 6.52098C13.9286 6.64802 14 6.82033 14 6.99999C14 7.17965 13.9286 7.35196 13.8016 7.479C13.6745 7.60604 13.5022 7.67741 13.3226 7.67741ZM0.677419 11.7419H13.3226C13.5022 11.7419 13.6745 11.6706 13.8016 11.5435C13.9286 11.4165 14 11.2442 14 11.0645C14 10.8848 13.9286 10.7125 13.8016 10.5855C13.6745 10.4585 13.5022 10.3871 13.3226 10.3871H0.677419C0.497757 10.3871 0.325452 10.4585 0.198411 10.5855C0.0713707 10.7125 0 10.8848 0 11.0645C0 11.2442 0.0713707 11.4165 0.198411 11.5435C0.325452 11.6706 0.497757 11.7419 0.677419 11.7419Z",
|
||||
fill: "currentColor"
|
||||
}, null, -1);
|
||||
var _hoisted_2$3 = [_hoisted_1$3];
|
||||
function render$3(_ctx, _cache, $props, $setup, $data, $options) {
|
||||
return openBlock(), createElementBlock("svg", mergeProps({
|
||||
width: "14",
|
||||
height: "14",
|
||||
viewBox: "0 0 14 14",
|
||||
fill: "none",
|
||||
xmlns: "http://www.w3.org/2000/svg"
|
||||
}, _ctx.pti()), _hoisted_2$3, 16);
|
||||
}
|
||||
__name(render$3, "render$3");
|
||||
script$3.render = render$3;
|
||||
var script$2 = {
|
||||
name: "PlusIcon",
|
||||
"extends": script$4
|
||||
};
|
||||
var _hoisted_1$2 = /* @__PURE__ */ createBaseVNode("path", {
|
||||
d: "M7.67742 6.32258V0.677419C7.67742 0.497757 7.60605 0.325452 7.47901 0.198411C7.35197 0.0713707 7.17966 0 7 0C6.82034 0 6.64803 0.0713707 6.52099 0.198411C6.39395 0.325452 6.32258 0.497757 6.32258 0.677419V6.32258H0.677419C0.497757 6.32258 0.325452 6.39395 0.198411 6.52099C0.0713707 6.64803 0 6.82034 0 7C0 7.17966 0.0713707 7.35197 0.198411 7.47901C0.325452 7.60605 0.497757 7.67742 0.677419 7.67742H6.32258V13.3226C6.32492 13.5015 6.39704 13.6725 6.52358 13.799C6.65012 13.9255 6.82106 13.9977 7 14C7.17966 14 7.35197 13.9286 7.47901 13.8016C7.60605 13.6745 7.67742 13.5022 7.67742 13.3226V7.67742H13.3226C13.5022 7.67742 13.6745 7.60605 13.8016 7.47901C13.9286 7.35197 14 7.17966 14 7C13.9977 6.82106 13.9255 6.65012 13.799 6.52358C13.6725 6.39704 13.5015 6.32492 13.3226 6.32258H7.67742Z",
|
||||
fill: "currentColor"
|
||||
}, null, -1);
|
||||
var _hoisted_2$2 = [_hoisted_1$2];
|
||||
function render$2(_ctx, _cache, $props, $setup, $data, $options) {
|
||||
return openBlock(), createElementBlock("svg", mergeProps({
|
||||
width: "14",
|
||||
height: "14",
|
||||
viewBox: "0 0 14 14",
|
||||
fill: "none",
|
||||
xmlns: "http://www.w3.org/2000/svg"
|
||||
}, _ctx.pti()), _hoisted_2$2, 16);
|
||||
}
|
||||
__name(render$2, "render$2");
|
||||
script$2.render = render$2;
|
||||
var script$1 = {
|
||||
name: "ExclamationTriangleIcon",
|
||||
"extends": script$4
|
||||
};
|
||||
var _hoisted_1$1 = /* @__PURE__ */ createBaseVNode("path", {
|
||||
d: "M13.4018 13.1893H0.598161C0.49329 13.189 0.390283 13.1615 0.299143 13.1097C0.208003 13.0578 0.131826 12.9832 0.0780112 12.8932C0.0268539 12.8015 0 12.6982 0 12.5931C0 12.4881 0.0268539 12.3848 0.0780112 12.293L6.47985 1.08982C6.53679 1.00399 6.61408 0.933574 6.70484 0.884867C6.7956 0.836159 6.897 0.810669 7 0.810669C7.103 0.810669 7.2044 0.836159 7.29516 0.884867C7.38592 0.933574 7.46321 1.00399 7.52015 1.08982L13.922 12.293C13.9731 12.3848 14 12.4881 14 12.5931C14 12.6982 13.9731 12.8015 13.922 12.8932C13.8682 12.9832 13.792 13.0578 13.7009 13.1097C13.6097 13.1615 13.5067 13.189 13.4018 13.1893ZM1.63046 11.989H12.3695L7 2.59425L1.63046 11.989Z",
|
||||
fill: "currentColor"
|
||||
}, null, -1);
|
||||
var _hoisted_2$1 = /* @__PURE__ */ createBaseVNode("path", {
|
||||
d: "M6.99996 8.78801C6.84143 8.78594 6.68997 8.72204 6.57787 8.60993C6.46576 8.49782 6.40186 8.34637 6.39979 8.18784V5.38703C6.39979 5.22786 6.46302 5.0752 6.57557 4.96265C6.68813 4.85009 6.84078 4.78686 6.99996 4.78686C7.15914 4.78686 7.31179 4.85009 7.42435 4.96265C7.5369 5.0752 7.60013 5.22786 7.60013 5.38703V8.18784C7.59806 8.34637 7.53416 8.49782 7.42205 8.60993C7.30995 8.72204 7.15849 8.78594 6.99996 8.78801Z",
|
||||
fill: "currentColor"
|
||||
}, null, -1);
|
||||
var _hoisted_3 = /* @__PURE__ */ createBaseVNode("path", {
|
||||
d: "M6.99996 11.1887C6.84143 11.1866 6.68997 11.1227 6.57787 11.0106C6.46576 10.8985 6.40186 10.7471 6.39979 10.5885V10.1884C6.39979 10.0292 6.46302 9.87658 6.57557 9.76403C6.68813 9.65147 6.84078 9.58824 6.99996 9.58824C7.15914 9.58824 7.31179 9.65147 7.42435 9.76403C7.5369 9.87658 7.60013 10.0292 7.60013 10.1884V10.5885C7.59806 10.7471 7.53416 10.8985 7.42205 11.0106C7.30995 11.1227 7.15849 11.1866 6.99996 11.1887Z",
|
||||
fill: "currentColor"
|
||||
}, null, -1);
|
||||
var _hoisted_4 = [_hoisted_1$1, _hoisted_2$1, _hoisted_3];
|
||||
function render$1(_ctx, _cache, $props, $setup, $data, $options) {
|
||||
return openBlock(), createElementBlock("svg", mergeProps({
|
||||
width: "14",
|
||||
height: "14",
|
||||
viewBox: "0 0 14 14",
|
||||
fill: "none",
|
||||
xmlns: "http://www.w3.org/2000/svg"
|
||||
}, _ctx.pti()), _hoisted_4, 16);
|
||||
}
|
||||
__name(render$1, "render$1");
|
||||
script$1.render = render$1;
|
||||
var script = {
|
||||
name: "InfoCircleIcon",
|
||||
"extends": script$4
|
||||
};
|
||||
var _hoisted_1 = /* @__PURE__ */ createBaseVNode("path", {
|
||||
"fill-rule": "evenodd",
|
||||
"clip-rule": "evenodd",
|
||||
d: "M3.11101 12.8203C4.26215 13.5895 5.61553 14 7 14C8.85652 14 10.637 13.2625 11.9497 11.9497C13.2625 10.637 14 8.85652 14 7C14 5.61553 13.5895 4.26215 12.8203 3.11101C12.0511 1.95987 10.9579 1.06266 9.67879 0.532846C8.3997 0.00303296 6.99224 -0.13559 5.63437 0.134506C4.2765 0.404603 3.02922 1.07129 2.05026 2.05026C1.07129 3.02922 0.404603 4.2765 0.134506 5.63437C-0.13559 6.99224 0.00303296 8.3997 0.532846 9.67879C1.06266 10.9579 1.95987 12.0511 3.11101 12.8203ZM3.75918 2.14976C4.71846 1.50879 5.84628 1.16667 7 1.16667C8.5471 1.16667 10.0308 1.78125 11.1248 2.87521C12.2188 3.96918 12.8333 5.45291 12.8333 7C12.8333 8.15373 12.4912 9.28154 11.8502 10.2408C11.2093 11.2001 10.2982 11.9478 9.23232 12.3893C8.16642 12.8308 6.99353 12.9463 5.86198 12.7212C4.73042 12.4962 3.69102 11.9406 2.87521 11.1248C2.05941 10.309 1.50384 9.26958 1.27876 8.13803C1.05367 7.00647 1.16919 5.83358 1.61071 4.76768C2.05222 3.70178 2.79989 2.79074 3.75918 2.14976ZM7.00002 4.8611C6.84594 4.85908 6.69873 4.79698 6.58977 4.68801C6.48081 4.57905 6.4187 4.43185 6.41669 4.27776V3.88888C6.41669 3.73417 6.47815 3.58579 6.58754 3.4764C6.69694 3.367 6.84531 3.30554 7.00002 3.30554C7.15473 3.30554 7.3031 3.367 7.4125 3.4764C7.52189 3.58579 7.58335 3.73417 7.58335 3.88888V4.27776C7.58134 4.43185 7.51923 4.57905 7.41027 4.68801C7.30131 4.79698 7.1541 4.85908 7.00002 4.8611ZM7.00002 10.6945C6.84594 10.6925 6.69873 10.6304 6.58977 10.5214C6.48081 10.4124 6.4187 10.2652 6.41669 10.1111V6.22225C6.41669 6.06754 6.47815 5.91917 6.58754 5.80977C6.69694 5.70037 6.84531 5.63892 7.00002 5.63892C7.15473 5.63892 7.3031 5.70037 7.4125 5.80977C7.52189 5.91917 7.58335 6.06754 7.58335 6.22225V10.1111C7.58134 10.2652 7.51923 10.4124 7.41027 10.5214C7.30131 10.6304 7.1541 10.6925 7.00002 10.6945Z",
|
||||
fill: "currentColor"
|
||||
}, null, -1);
|
||||
var _hoisted_2 = [_hoisted_1];
|
||||
function render(_ctx, _cache, $props, $setup, $data, $options) {
|
||||
return openBlock(), createElementBlock("svg", mergeProps({
|
||||
width: "14",
|
||||
height: "14",
|
||||
viewBox: "0 0 14 14",
|
||||
fill: "none",
|
||||
xmlns: "http://www.w3.org/2000/svg"
|
||||
}, _ctx.pti()), _hoisted_2, 16);
|
||||
}
|
||||
__name(render, "render");
|
||||
script.render = render;
|
||||
export {
|
||||
script$1 as a,
|
||||
script$3 as b,
|
||||
script$2 as c,
|
||||
script as s
|
||||
};
|
||||
//# sourceMappingURL=index-DYEEBf64.js.map
|
1
web/assets/index-DYEEBf64.js.map
generated
vendored
Normal file
1
web/assets/index-DYEEBf64.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
4
web/assets/userSelection-Duxc-t_S.js → web/assets/userSelection-DVDwxLD5.js
generated
vendored
4
web/assets/userSelection-Duxc-t_S.js → web/assets/userSelection-DVDwxLD5.js
generated
vendored
@ -1,6 +1,6 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { b4 as api, ca as $el } from "./index-DGAbdBYF.js";
|
||||
import { bq as api, bv as $el } from "./index-CgU1oKZt.js";
|
||||
function createSpinner() {
|
||||
const div = document.createElement("div");
|
||||
div.innerHTML = `<div class="lds-ring"><div></div><div></div><div></div><div></div></div>`;
|
||||
@ -126,4 +126,4 @@ window.comfyAPI.userSelection.UserSelectionScreen = UserSelectionScreen;
|
||||
export {
|
||||
UserSelectionScreen
|
||||
};
|
||||
//# sourceMappingURL=userSelection-Duxc-t_S.js.map
|
||||
//# sourceMappingURL=userSelection-DVDwxLD5.js.map
|
1
web/assets/userSelection-DVDwxLD5.js.map
generated
vendored
Normal file
1
web/assets/userSelection-DVDwxLD5.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1
web/assets/userSelection-Duxc-t_S.js.map
generated
vendored
1
web/assets/userSelection-Duxc-t_S.js.map
generated
vendored
File diff suppressed because one or more lines are too long
4
web/assets/widgetInputs-DdoWwzg5.js → web/assets/widgetInputs-DNVvusS1.js
generated
vendored
4
web/assets/widgetInputs-DdoWwzg5.js → web/assets/widgetInputs-DNVvusS1.js
generated
vendored
@ -1,6 +1,6 @@
|
||||
var __defProp = Object.defineProperty;
|
||||
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
||||
import { l as LGraphNode, k as app, cf as applyTextReplacements, ce as ComfyWidgets, ci as addValueControlWidgets, z as LiteGraph } from "./index-DGAbdBYF.js";
|
||||
import { e as LGraphNode, c as app, bA as applyTextReplacements, bz as ComfyWidgets, bG as addValueControlWidgets, k as LiteGraph } from "./index-CgU1oKZt.js";
|
||||
const CONVERTED_TYPE = "converted-widget";
|
||||
const VALID_TYPES = [
|
||||
"STRING",
|
||||
@ -753,4 +753,4 @@ export {
|
||||
mergeIfValid,
|
||||
setWidgetConfig
|
||||
};
|
||||
//# sourceMappingURL=widgetInputs-DdoWwzg5.js.map
|
||||
//# sourceMappingURL=widgetInputs-DNVvusS1.js.map
|
1
web/assets/widgetInputs-DNVvusS1.js.map
generated
vendored
Normal file
1
web/assets/widgetInputs-DNVvusS1.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1
web/assets/widgetInputs-DdoWwzg5.js.map
generated
vendored
1
web/assets/widgetInputs-DdoWwzg5.js.map
generated
vendored
File diff suppressed because one or more lines are too long
4
web/index.html
vendored
4
web/index.html
vendored
@ -6,8 +6,8 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
|
||||
<link rel="stylesheet" type="text/css" href="user.css" />
|
||||
<link rel="stylesheet" type="text/css" href="materialdesignicons.min.css" />
|
||||
<script type="module" crossorigin src="./assets/index-DGAbdBYF.js"></script>
|
||||
<link rel="stylesheet" crossorigin href="./assets/index-BHJGjcJh.css">
|
||||
<script type="module" crossorigin src="./assets/index-CgU1oKZt.js"></script>
|
||||
<link rel="stylesheet" crossorigin href="./assets/index-BDQCPKeJ.css">
|
||||
</head>
|
||||
<body class="litegraph grid">
|
||||
<div id="vue-app"></div>
|
||||
|
Loading…
Reference in New Issue
Block a user