Merge branch 'comfyanonymous:master' into model-sampling-cpu

This commit is contained in:
dave-juicelabs 2025-02-12 13:40:22 -06:00 committed by GitHub
commit be7e3d4d69
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
97 changed files with 108086 additions and 44069 deletions

View File

@ -12,7 +12,7 @@ on:
description: 'CUDA version' description: 'CUDA version'
required: true required: true
type: string type: string
default: "124" default: "126"
python_minor: python_minor:
description: 'Python minor version' description: 'Python minor version'
required: true required: true

View File

@ -18,7 +18,7 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"] python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }} - name: Set up Python ${{ matrix.python-version }}

View File

@ -18,7 +18,7 @@ jobs:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v4 uses: actions/setup-python@v4
with: with:
python-version: '3.10' python-version: '3.12'
- name: Install requirements - name: Install requirements
run: | run: |
python -m pip install --upgrade pip python -m pip install --upgrade pip

View File

@ -17,7 +17,7 @@ on:
description: 'cuda version' description: 'cuda version'
required: true required: true
type: string type: string
default: "124" default: "126"
python_minor: python_minor:
description: 'python minor version' description: 'python minor version'

View File

@ -7,7 +7,7 @@ on:
description: 'cuda version' description: 'cuda version'
required: true required: true
type: string type: string
default: "124" default: "126"
python_minor: python_minor:
description: 'python minor version' description: 'python minor version'

View File

@ -15,6 +15,7 @@
# Python web server # Python web server
/api_server/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata /api_server/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata
/app/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata /app/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata
/utils/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata
# Frontend assets # Frontend assets
/web/ @huchenlei @webfiltered @pythongosssss @yoland68 @robinjhuang /web/ @huchenlei @webfiltered @pythongosssss @yoland68 @robinjhuang

View File

@ -47,6 +47,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin
- [AuraFlow](https://comfyanonymous.github.io/ComfyUI_examples/aura_flow/) - [AuraFlow](https://comfyanonymous.github.io/ComfyUI_examples/aura_flow/)
- [HunyuanDiT](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_dit/) - [HunyuanDiT](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_dit/)
- [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/) - [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/)
- [Lumina Image 2.0](https://comfyanonymous.github.io/ComfyUI_examples/lumina2/)
- Video Models - Video Models
- [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/) - [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/)
- [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/) - [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/)
@ -130,6 +131,8 @@ Simply download, extract with [7-Zip](https://7-zip.org) and run. Make sure you
If you have trouble extracting it, right click the file -> properties -> unblock If you have trouble extracting it, right click the file -> properties -> unblock
If you have a 50 series Blackwell card like a 5090 or 5080 see [this discussion thread](https://github.com/comfyanonymous/ComfyUI/discussions/6643)
#### How do I share models between another UI and ComfyUI? #### How do I share models between another UI and ComfyUI?
See the [Config file](extra_model_paths.yaml.example) to set the search paths for models. In the standalone windows build you can find this file in the ComfyUI directory. Rename this file to extra_model_paths.yaml and edit it with your favorite text editor. See the [Config file](extra_model_paths.yaml.example) to set the search paths for models. In the standalone windows build you can find this file in the ComfyUI directory. Rename this file to extra_model_paths.yaml and edit it with your favorite text editor.
@ -140,7 +143,7 @@ To run it on services like paperspace, kaggle or colab you can use my [Jupyter N
## Manual Install (Windows, Linux) ## Manual Install (Windows, Linux)
Note that some dependencies do not yet support python 3.13 so using 3.12 is recommended. python 3.13 is supported but using 3.12 is recommended because some custom nodes and their dependencies might not support it yet.
Git clone this repo. Git clone this repo.
@ -152,11 +155,11 @@ Put your VAE in: models/vae
### AMD GPUs (Linux only) ### AMD GPUs (Linux only)
AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version: AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version:
```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.2``` ```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.2.4```
This is the command to install the nightly with ROCm 6.2 which might have some performance improvements: This is the command to install the nightly with ROCm 6.3 which might have some performance improvements:
```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.2.4``` ```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm6.3```
### Intel GPUs (Windows and Linux) ### Intel GPUs (Windows and Linux)
@ -186,7 +189,7 @@ Additional discussion and help can be found [here](https://github.com/comfyanony
Nvidia users should install stable pytorch using this command: Nvidia users should install stable pytorch using this command:
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu124``` ```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu126```
This is the command to install pytorch nightly instead which might have performance improvements: This is the command to install pytorch nightly instead which might have performance improvements:

View File

@ -1,7 +1,6 @@
from aiohttp import web from aiohttp import web
from typing import Optional from typing import Optional
from folder_paths import models_dir, user_directory, output_directory, folder_names_and_paths from folder_paths import folder_names_and_paths
from api_server.services.file_service import FileService
from api_server.services.terminal_service import TerminalService from api_server.services.terminal_service import TerminalService
import app.logger import app.logger
@ -15,26 +14,10 @@ class InternalRoutes:
def __init__(self, prompt_server): def __init__(self, prompt_server):
self.routes: web.RouteTableDef = web.RouteTableDef() self.routes: web.RouteTableDef = web.RouteTableDef()
self._app: Optional[web.Application] = None self._app: Optional[web.Application] = None
self.file_service = FileService({
"models": models_dir,
"user": user_directory,
"output": output_directory
})
self.prompt_server = prompt_server self.prompt_server = prompt_server
self.terminal_service = TerminalService(prompt_server) self.terminal_service = TerminalService(prompt_server)
def setup_routes(self): def setup_routes(self):
@self.routes.get('/files')
async def list_files(request):
directory_key = request.query.get('directory', '')
try:
file_list = self.file_service.list_files(directory_key)
return web.json_response({"files": file_list})
except ValueError as e:
return web.json_response({"error": str(e)}, status=400)
except Exception as e:
return web.json_response({"error": str(e)}, status=500)
@self.routes.get('/logs') @self.routes.get('/logs')
async def get_logs(request): async def get_logs(request):
return web.json_response("".join([(l["t"] + " - " + l["m"]) for l in app.logger.get_logs()])) return web.json_response("".join([(l["t"] + " - " + l["m"]) for l in app.logger.get_logs()]))

View File

@ -1,13 +0,0 @@
from typing import Dict, List, Optional
from api_server.utils.file_operations import FileSystemOperations, FileSystemItem
class FileService:
def __init__(self, allowed_directories: Dict[str, str], file_system_ops: Optional[FileSystemOperations] = None):
self.allowed_directories: Dict[str, str] = allowed_directories
self.file_system_ops: FileSystemOperations = file_system_ops or FileSystemOperations()
def list_files(self, directory_key: str) -> List[FileSystemItem]:
if directory_key not in self.allowed_directories:
raise ValueError("Invalid directory key")
directory_path: str = self.allowed_directories[directory_key]
return self.file_system_ops.walk_directory(directory_path)

View File

@ -4,12 +4,93 @@ import os
import folder_paths import folder_paths
import glob import glob
from aiohttp import web from aiohttp import web
import json
import logging
from functools import lru_cache
from utils.json_util import merge_json_recursive
# Extra locale files to load into main.json
EXTRA_LOCALE_FILES = [
"nodeDefs.json",
"commands.json",
"settings.json",
]
def safe_load_json_file(file_path: str) -> dict:
if not os.path.exists(file_path):
return {}
try:
with open(file_path, "r", encoding="utf-8") as f:
return json.load(f)
except json.JSONDecodeError:
logging.error(f"Error loading {file_path}")
return {}
class CustomNodeManager: class CustomNodeManager:
""" @lru_cache(maxsize=1)
Placeholder to refactor the custom node management features from ComfyUI-Manager. def build_translations(self):
Currently it only contains the custom workflow templates feature. """Load all custom nodes translations during initialization. Translations are
""" expected to be loaded from `locales/` folder.
The folder structure is expected to be the following:
- custom_nodes/
- custom_node_1/
- locales/
- en/
- main.json
- commands.json
- settings.json
returned translations are expected to be in the following format:
{
"en": {
"nodeDefs": {...},
"commands": {...},
"settings": {...},
...{other main.json keys}
}
}
"""
translations = {}
for folder in folder_paths.get_folder_paths("custom_nodes"):
# Sort glob results for deterministic ordering
for custom_node_dir in sorted(glob.glob(os.path.join(folder, "*/"))):
locales_dir = os.path.join(custom_node_dir, "locales")
if not os.path.exists(locales_dir):
continue
for lang_dir in glob.glob(os.path.join(locales_dir, "*/")):
lang_code = os.path.basename(os.path.dirname(lang_dir))
if lang_code not in translations:
translations[lang_code] = {}
# Load main.json
main_file = os.path.join(lang_dir, "main.json")
node_translations = safe_load_json_file(main_file)
# Load extra locale files
for extra_file in EXTRA_LOCALE_FILES:
extra_file_path = os.path.join(lang_dir, extra_file)
key = extra_file.split(".")[0]
json_data = safe_load_json_file(extra_file_path)
if json_data:
node_translations[key] = json_data
if node_translations:
translations[lang_code] = merge_json_recursive(
translations[lang_code], node_translations
)
return translations
def add_routes(self, routes, webapp, loadedModules): def add_routes(self, routes, webapp, loadedModules):
@routes.get("/workflow_templates") @routes.get("/workflow_templates")
@ -18,17 +99,36 @@ class CustomNodeManager:
files = [ files = [
file file
for folder in folder_paths.get_folder_paths("custom_nodes") for folder in folder_paths.get_folder_paths("custom_nodes")
for file in glob.glob(os.path.join(folder, '*/example_workflows/*.json')) for file in glob.glob(
os.path.join(folder, "*/example_workflows/*.json")
)
] ]
workflow_templates_dict = {} # custom_nodes folder name -> example workflow names workflow_templates_dict = (
{}
) # custom_nodes folder name -> example workflow names
for file in files: for file in files:
custom_nodes_name = os.path.basename(os.path.dirname(os.path.dirname(file))) custom_nodes_name = os.path.basename(
os.path.dirname(os.path.dirname(file))
)
workflow_name = os.path.splitext(os.path.basename(file))[0] workflow_name = os.path.splitext(os.path.basename(file))[0]
workflow_templates_dict.setdefault(custom_nodes_name, []).append(workflow_name) workflow_templates_dict.setdefault(custom_nodes_name, []).append(
workflow_name
)
return web.json_response(workflow_templates_dict) return web.json_response(workflow_templates_dict)
# Serve workflow templates from custom nodes. # Serve workflow templates from custom nodes.
for module_name, module_dir in loadedModules: for module_name, module_dir in loadedModules:
workflows_dir = os.path.join(module_dir, 'example_workflows') workflows_dir = os.path.join(module_dir, "example_workflows")
if os.path.exists(workflows_dir): if os.path.exists(workflows_dir):
webapp.add_routes([web.static('/api/workflow_templates/' + module_name, workflows_dir)]) webapp.add_routes(
[
web.static(
"/api/workflow_templates/" + module_name, workflows_dir
)
]
)
@routes.get("/i18n")
async def get_i18n(request):
"""Returns translations from all custom nodes' locales folders."""
return web.json_response(self.build_translations())

View File

@ -43,10 +43,11 @@ parser.add_argument("--tls-certfile", type=str, help="Path to TLS (SSL) certific
parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORIGIN", nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.") parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORIGIN", nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.")
parser.add_argument("--max-upload-size", type=float, default=100, help="Set the maximum upload size in MB.") parser.add_argument("--max-upload-size", type=float, default=100, help="Set the maximum upload size in MB.")
parser.add_argument("--base-directory", type=str, default=None, help="Set the ComfyUI base directory for models, custom_nodes, input, output, temp, and user directories.")
parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.") parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.")
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory. Overrides --base-directory.")
parser.add_argument("--temp-directory", type=str, default=None, help="Set the ComfyUI temp directory (default is in the ComfyUI directory).") parser.add_argument("--temp-directory", type=str, default=None, help="Set the ComfyUI temp directory (default is in the ComfyUI directory). Overrides --base-directory.")
parser.add_argument("--input-directory", type=str, default=None, help="Set the ComfyUI input directory.") parser.add_argument("--input-directory", type=str, default=None, help="Set the ComfyUI input directory. Overrides --base-directory.")
parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.") parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.")
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.") parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.") parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
@ -177,7 +178,9 @@ parser.add_argument(
help="The local filesystem path to the directory where the frontend is located. Overrides --front-end-version.", help="The local filesystem path to the directory where the frontend is located. Overrides --front-end-version.",
) )
parser.add_argument("--user-directory", type=is_valid_directory, default=None, help="Set the ComfyUI user directory with an absolute path.") parser.add_argument("--user-directory", type=is_valid_directory, default=None, help="Set the ComfyUI user directory with an absolute path. Overrides --base-directory.")
parser.add_argument("--enable-compress-response-body", action="store_true", help="Enable compressing response body.")
if comfy.options.args_parsing: if comfy.options.args_parsing:
args = parser.parse_args() args = parser.parse_args()
@ -189,3 +192,6 @@ if args.windows_standalone_build:
if args.disable_auto_launch: if args.disable_auto_launch:
args.auto_launch = False args.auto_launch = False
if args.force_fp16:
args.fp16_unet = True

View File

@ -102,9 +102,10 @@ class CLIPTextModel_(torch.nn.Module):
mask = None mask = None
if attention_mask is not None: if attention_mask is not None:
mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]) mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
mask = mask.masked_fill(mask.to(torch.bool), float("-inf")) mask = mask.masked_fill(mask.to(torch.bool), -torch.finfo(x.dtype).max)
causal_mask = torch.full((x.shape[1], x.shape[1]), -torch.finfo(x.dtype).max, dtype=x.dtype, device=x.device).triu_(1)
causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1)
if mask is not None: if mask is not None:
mask += causal_mask mask += causal_mask
else: else:

View File

@ -3,9 +3,6 @@ import math
import comfy.utils import comfy.utils
def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9)
return abs(a*b) // math.gcd(a, b)
class CONDRegular: class CONDRegular:
def __init__(self, cond): def __init__(self, cond):
self.cond = cond self.cond = cond
@ -46,7 +43,7 @@ class CONDCrossAttn(CONDRegular):
if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen
return False return False
mult_min = lcm(s1[1], s2[1]) mult_min = math.lcm(s1[1], s2[1])
diff = mult_min // min(s1[1], s2[1]) diff = mult_min // min(s1[1], s2[1])
if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much
return False return False
@ -57,7 +54,7 @@ class CONDCrossAttn(CONDRegular):
crossattn_max_len = self.cond.shape[1] crossattn_max_len = self.cond.shape[1]
for x in others: for x in others:
c = x.cond c = x.cond
crossattn_max_len = lcm(crossattn_max_len, c.shape[1]) crossattn_max_len = math.lcm(crossattn_max_len, c.shape[1])
conds.append(c) conds.append(c)
out = [] out = []

View File

@ -4,105 +4,6 @@ import logging
# conversion code from https://github.com/huggingface/diffusers/blob/main/scripts/convert_diffusers_to_original_stable_diffusion.py # conversion code from https://github.com/huggingface/diffusers/blob/main/scripts/convert_diffusers_to_original_stable_diffusion.py
# =================#
# UNet Conversion #
# =================#
unet_conversion_map = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
unet_conversion_map_resnet = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
unet_conversion_map_layer = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
sd_down_res_prefix = f"input_blocks.{3 * i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
sd_down_atn_prefix = f"input_blocks.{3 * i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
sd_up_res_prefix = f"output_blocks.{3 * i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
sd_up_atn_prefix = f"output_blocks.{3 * i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
sd_downsample_prefix = f"input_blocks.{3 * (i + 1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
sd_upsample_prefix = f"output_blocks.{3 * i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
hf_mid_atn_prefix = "mid_block.attentions.0."
sd_mid_atn_prefix = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
hf_mid_res_prefix = f"mid_block.resnets.{j}."
sd_mid_res_prefix = f"middle_block.{2 * j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def convert_unet_state_dict(unet_state_dict):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
mapping = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
mapping[hf_name] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
v = v.replace(hf_part, sd_part)
mapping[k] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
v = v.replace(hf_part, sd_part)
mapping[k] = v
new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================# # ================#
# VAE Conversion # # VAE Conversion #
# ================# # ================#
@ -213,6 +114,7 @@ textenc_pattern = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
code2idx = {"q": 0, "k": 1, "v": 2} code2idx = {"q": 0, "k": 1, "v": 2}
# This function exists because at the time of writing torch.cat can't do fp8 with cuda # This function exists because at the time of writing torch.cat can't do fp8 with cuda
def cat_tensors(tensors): def cat_tensors(tensors):
x = 0 x = 0
@ -229,6 +131,7 @@ def cat_tensors(tensors):
return out return out
def convert_text_enc_state_dict_v20(text_enc_dict, prefix=""): def convert_text_enc_state_dict_v20(text_enc_dict, prefix=""):
new_state_dict = {} new_state_dict = {}
capture_qkv_weight = {} capture_qkv_weight = {}
@ -284,5 +187,3 @@ def convert_text_enc_state_dict_v20(text_enc_dict, prefix=""):
def convert_text_enc_state_dict(text_enc_dict): def convert_text_enc_state_dict(text_enc_dict):
return text_enc_dict return text_enc_dict

View File

@ -661,7 +661,7 @@ class UniPC:
if x_t is None: if x_t is None:
if use_predictor: if use_predictor:
pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s) pred_res = torch.tensordot(D1s, rhos_p, dims=([1], [0])) # torch.einsum('k,bkchw->bchw', rhos_p, D1s)
else: else:
pred_res = 0 pred_res = 0
x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * pred_res x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * pred_res
@ -669,7 +669,7 @@ class UniPC:
if use_corrector: if use_corrector:
model_t = self.model_fn(x_t, t) model_t = self.model_fn(x_t, t)
if D1s is not None: if D1s is not None:
corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s) corr_res = torch.tensordot(D1s, rhos_c[:-1], dims=([1], [0])) # torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
else: else:
corr_res = 0 corr_res = 0
D1_t = (model_t - model_prev_0) D1_t = (model_t - model_prev_0)

View File

@ -40,7 +40,7 @@ def get_sigmas_polyexponential(n, sigma_min, sigma_max, rho=1., device='cpu'):
def get_sigmas_vp(n, beta_d=19.9, beta_min=0.1, eps_s=1e-3, device='cpu'): def get_sigmas_vp(n, beta_d=19.9, beta_min=0.1, eps_s=1e-3, device='cpu'):
"""Constructs a continuous VP noise schedule.""" """Constructs a continuous VP noise schedule."""
t = torch.linspace(1, eps_s, n, device=device) t = torch.linspace(1, eps_s, n, device=device)
sigmas = torch.sqrt(torch.exp(beta_d * t ** 2 / 2 + beta_min * t) - 1) sigmas = torch.sqrt(torch.special.expm1(beta_d * t ** 2 / 2 + beta_min * t))
return append_zero(sigmas) return append_zero(sigmas)
@ -1267,7 +1267,7 @@ def sample_dpmpp_2m_cfg_pp(model, x, sigmas, extra_args=None, callback=None, dis
return x return x
@torch.no_grad() @torch.no_grad()
def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1., noise_sampler=None, cfg_pp=False): def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., noise_sampler=None, eta=1., cfg_pp=False):
extra_args = {} if extra_args is None else extra_args extra_args = {} if extra_args is None else extra_args
seed = extra_args.get("seed", None) seed = extra_args.get("seed", None)
noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
@ -1289,50 +1289,80 @@ def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None
extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True) extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
for i in trange(len(sigmas) - 1, disable=disable): for i in trange(len(sigmas) - 1, disable=disable):
if s_churn > 0: denoised = model(x, sigmas[i] * s_in, **extra_args)
gamma = min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0 sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
sigma_hat = sigmas[i] * (gamma + 1)
else:
gamma = 0
sigma_hat = sigmas[i]
if gamma > 0:
eps = torch.randn_like(x) * s_noise
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
denoised = model(x, sigma_hat * s_in, **extra_args)
if callback is not None: if callback is not None:
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised}) callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
if sigmas[i + 1] == 0 or old_denoised is None: if sigma_down == 0 or old_denoised is None:
# Euler method # Euler method
if cfg_pp: if cfg_pp:
d = to_d(x, sigma_hat, uncond_denoised) d = to_d(x, sigmas[i], uncond_denoised)
x = denoised + d * sigmas[i + 1] x = denoised + d * sigma_down
else: else:
d = to_d(x, sigma_hat, denoised) d = to_d(x, sigmas[i], denoised)
dt = sigmas[i + 1] - sigma_hat dt = sigma_down - sigmas[i]
x = x + d * dt x = x + d * dt
else: else:
# Second order multistep method in https://arxiv.org/pdf/2308.02157 # Second order multistep method in https://arxiv.org/pdf/2308.02157
t, t_next, t_prev = t_fn(sigmas[i]), t_fn(sigmas[i + 1]), t_fn(sigmas[i - 1]) t, t_next, t_prev = t_fn(sigmas[i]), t_fn(sigma_down), t_fn(sigmas[i - 1])
h = t_next - t h = t_next - t
c2 = (t_prev - t) / h c2 = (t_prev - t) / h
phi1_val, phi2_val = phi1_fn(-h), phi2_fn(-h) phi1_val, phi2_val = phi1_fn(-h), phi2_fn(-h)
b1 = torch.nan_to_num(phi1_val - 1.0 / c2 * phi2_val, nan=0.0) b1 = torch.nan_to_num(phi1_val - phi2_val / c2, nan=0.0)
b2 = torch.nan_to_num(1.0 / c2 * phi2_val, nan=0.0) b2 = torch.nan_to_num(phi2_val / c2, nan=0.0)
if cfg_pp: if cfg_pp:
x = x + (denoised - uncond_denoised) x = x + (denoised - uncond_denoised)
x = sigma_fn(h) * x + h * (b1 * uncond_denoised + b2 * old_denoised)
else:
x = sigma_fn(h) * x + h * (b1 * denoised + b2 * old_denoised)
x = (sigma_fn(t_next) / sigma_fn(t)) * x + h * (b1 * denoised + b2 * old_denoised) # Noise addition
if sigmas[i + 1] > 0:
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
old_denoised = denoised if cfg_pp:
old_denoised = uncond_denoised
else:
old_denoised = denoised
return x return x
@torch.no_grad() @torch.no_grad()
def sample_res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1., noise_sampler=None): def sample_res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., noise_sampler=None):
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_churn=s_churn, s_tmin=s_tmin, s_tmax=s_tmax, s_noise=s_noise, noise_sampler=noise_sampler, cfg_pp=False) return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=0., cfg_pp=False)
@torch.no_grad() @torch.no_grad()
def sample_res_multistep_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1., noise_sampler=None): def sample_res_multistep_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., noise_sampler=None):
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_churn=s_churn, s_tmin=s_tmin, s_tmax=s_tmax, s_noise=s_noise, noise_sampler=noise_sampler, cfg_pp=True) return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=0., cfg_pp=True)
@torch.no_grad()
def sample_res_multistep_ancestral(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=eta, cfg_pp=False)
@torch.no_grad()
def sample_res_multistep_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=eta, cfg_pp=True)
@torch.no_grad()
def sample_gradient_estimation(model, x, sigmas, extra_args=None, callback=None, disable=None, ge_gamma=2.):
"""Gradient-estimation sampler. Paper: https://openreview.net/pdf?id=o2ND9v0CeK"""
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
old_d = None
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
d = to_d(x, sigmas[i], denoised)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
dt = sigmas[i + 1] - sigmas[i]
if i == 0:
# Euler method
x = x + d * dt
else:
# Gradient estimation
d_bar = ge_gamma * d + (1 - ge_gamma) * old_d
x = x + d_bar * dt
old_d = d
return x

View File

@ -22,7 +22,7 @@ def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, mask=None) -> Tensor:
def rope(pos: Tensor, dim: int, theta: int) -> Tensor: def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
assert dim % 2 == 0 assert dim % 2 == 0
if comfy.model_management.is_device_mps(pos.device) or comfy.model_management.is_intel_xpu(): if comfy.model_management.is_device_mps(pos.device) or comfy.model_management.is_intel_xpu() or comfy.model_management.is_directml_enabled():
device = torch.device("cpu") device = torch.device("cpu")
else: else:
device = pos.device device = pos.device

View File

@ -109,9 +109,8 @@ class Flux(nn.Module):
img = self.img_in(img) img = self.img_in(img)
vec = self.time_in(timestep_embedding(timesteps, 256).to(img.dtype)) vec = self.time_in(timestep_embedding(timesteps, 256).to(img.dtype))
if self.params.guidance_embed: if self.params.guidance_embed:
if guidance is None: if guidance is not None:
raise ValueError("Didn't get guidance strength for guidance distilled model.") vec = vec + self.guidance_in(timestep_embedding(guidance, 256).to(img.dtype))
vec = vec + self.guidance_in(timestep_embedding(guidance, 256).to(img.dtype))
vec = vec + self.vector_in(y[:,:self.params.vec_in_dim]) vec = vec + self.vector_in(y[:,:self.params.vec_in_dim])
txt = self.txt_in(txt) txt = self.txt_in(txt)
@ -186,7 +185,7 @@ class Flux(nn.Module):
img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels) img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels)
return img return img
def forward(self, x, timestep, context, y, guidance, control=None, transformer_options={}, **kwargs): def forward(self, x, timestep, context, y, guidance=None, control=None, transformer_options={}, **kwargs):
bs, c, h, w = x.shape bs, c, h, w = x.shape
patch_size = self.patch_size patch_size = self.patch_size
x = comfy.ldm.common_dit.pad_to_patch_size(x, (patch_size, patch_size)) x = comfy.ldm.common_dit.pad_to_patch_size(x, (patch_size, patch_size))

View File

@ -240,9 +240,8 @@ class HunyuanVideo(nn.Module):
vec = vec + self.vector_in(y[:, :self.params.vec_in_dim]) vec = vec + self.vector_in(y[:, :self.params.vec_in_dim])
if self.params.guidance_embed: if self.params.guidance_embed:
if guidance is None: if guidance is not None:
raise ValueError("Didn't get guidance strength for guidance distilled model.") vec = vec + self.guidance_in(timestep_embedding(guidance, 256).to(img.dtype))
vec = vec + self.guidance_in(timestep_embedding(guidance, 256).to(img.dtype))
if txt_mask is not None and not torch.is_floating_point(txt_mask): if txt_mask is not None and not torch.is_floating_point(txt_mask):
txt_mask = (txt_mask - 1).to(img.dtype) * torch.finfo(img.dtype).max txt_mask = (txt_mask - 1).to(img.dtype) * torch.finfo(img.dtype).max
@ -314,7 +313,7 @@ class HunyuanVideo(nn.Module):
img = img.reshape(initial_shape) img = img.reshape(initial_shape)
return img return img
def forward(self, x, timestep, context, y, guidance, attention_mask=None, control=None, transformer_options={}, **kwargs): def forward(self, x, timestep, context, y, guidance=None, attention_mask=None, control=None, transformer_options={}, **kwargs):
bs, c, t, h, w = x.shape bs, c, t, h, w = x.shape
patch_size = self.patch_size patch_size = self.patch_size
t_len = ((t + (patch_size[0] // 2)) // patch_size[0]) t_len = ((t + (patch_size[0] // 2)) // patch_size[0])

622
comfy/ldm/lumina/model.py Normal file
View File

@ -0,0 +1,622 @@
# Code from: https://github.com/Alpha-VLLM/Lumina-Image-2.0/blob/main/models/model.py
from __future__ import annotations
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import comfy.ldm.common_dit
from comfy.ldm.modules.diffusionmodules.mmdit import TimestepEmbedder, RMSNorm
from comfy.ldm.modules.attention import optimized_attention_masked
from comfy.ldm.flux.layers import EmbedND
def modulate(x, scale):
return x * (1 + scale.unsqueeze(1))
#############################################################################
# Core NextDiT Model #
#############################################################################
class JointAttention(nn.Module):
"""Multi-head attention module."""
def __init__(
self,
dim: int,
n_heads: int,
n_kv_heads: Optional[int],
qk_norm: bool,
operation_settings={},
):
"""
Initialize the Attention module.
Args:
dim (int): Number of input dimensions.
n_heads (int): Number of heads.
n_kv_heads (Optional[int]): Number of kv heads, if using GQA.
"""
super().__init__()
self.n_kv_heads = n_heads if n_kv_heads is None else n_kv_heads
self.n_local_heads = n_heads
self.n_local_kv_heads = self.n_kv_heads
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = dim // n_heads
self.qkv = operation_settings.get("operations").Linear(
dim,
(n_heads + self.n_kv_heads + self.n_kv_heads) * self.head_dim,
bias=False,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
self.out = operation_settings.get("operations").Linear(
n_heads * self.head_dim,
dim,
bias=False,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
if qk_norm:
self.q_norm = RMSNorm(self.head_dim, elementwise_affine=True, **operation_settings)
self.k_norm = RMSNorm(self.head_dim, elementwise_affine=True, **operation_settings)
else:
self.q_norm = self.k_norm = nn.Identity()
@staticmethod
def apply_rotary_emb(
x_in: torch.Tensor,
freqs_cis: torch.Tensor,
) -> torch.Tensor:
"""
Apply rotary embeddings to input tensors using the given frequency
tensor.
This function applies rotary embeddings to the given query 'xq' and
key 'xk' tensors using the provided frequency tensor 'freqs_cis'. The
input tensors are reshaped as complex numbers, and the frequency tensor
is reshaped for broadcasting compatibility. The resulting tensors
contain rotary embeddings and are returned as real tensors.
Args:
x_in (torch.Tensor): Query or Key tensor to apply rotary embeddings.
freqs_cis (torch.Tensor): Precomputed frequency tensor for complex
exponentials.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor
and key tensor with rotary embeddings.
"""
t_ = x_in.reshape(*x_in.shape[:-1], -1, 1, 2)
t_out = freqs_cis[..., 0] * t_[..., 0] + freqs_cis[..., 1] * t_[..., 1]
return t_out.reshape(*x_in.shape)
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
freqs_cis: torch.Tensor,
) -> torch.Tensor:
"""
Args:
x:
x_mask:
freqs_cis:
Returns:
"""
bsz, seqlen, _ = x.shape
xq, xk, xv = torch.split(
self.qkv(x),
[
self.n_local_heads * self.head_dim,
self.n_local_kv_heads * self.head_dim,
self.n_local_kv_heads * self.head_dim,
],
dim=-1,
)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xq = self.q_norm(xq)
xk = self.k_norm(xk)
xq = JointAttention.apply_rotary_emb(xq, freqs_cis=freqs_cis)
xk = JointAttention.apply_rotary_emb(xk, freqs_cis=freqs_cis)
n_rep = self.n_local_heads // self.n_local_kv_heads
if n_rep >= 1:
xk = xk.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3)
xv = xv.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3)
output = optimized_attention_masked(xq.movedim(1, 2), xk.movedim(1, 2), xv.movedim(1, 2), self.n_local_heads, x_mask, skip_reshape=True)
return self.out(output)
class FeedForward(nn.Module):
def __init__(
self,
dim: int,
hidden_dim: int,
multiple_of: int,
ffn_dim_multiplier: Optional[float],
operation_settings={},
):
"""
Initialize the FeedForward module.
Args:
dim (int): Input dimension.
hidden_dim (int): Hidden dimension of the feedforward layer.
multiple_of (int): Value to ensure hidden dimension is a multiple
of this value.
ffn_dim_multiplier (float, optional): Custom multiplier for hidden
dimension. Defaults to None.
"""
super().__init__()
# custom dim factor multiplier
if ffn_dim_multiplier is not None:
hidden_dim = int(ffn_dim_multiplier * hidden_dim)
hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
self.w1 = operation_settings.get("operations").Linear(
dim,
hidden_dim,
bias=False,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
self.w2 = operation_settings.get("operations").Linear(
hidden_dim,
dim,
bias=False,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
self.w3 = operation_settings.get("operations").Linear(
dim,
hidden_dim,
bias=False,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
# @torch.compile
def _forward_silu_gating(self, x1, x3):
return F.silu(x1) * x3
def forward(self, x):
return self.w2(self._forward_silu_gating(self.w1(x), self.w3(x)))
class JointTransformerBlock(nn.Module):
def __init__(
self,
layer_id: int,
dim: int,
n_heads: int,
n_kv_heads: int,
multiple_of: int,
ffn_dim_multiplier: float,
norm_eps: float,
qk_norm: bool,
modulation=True,
operation_settings={},
) -> None:
"""
Initialize a TransformerBlock.
Args:
layer_id (int): Identifier for the layer.
dim (int): Embedding dimension of the input features.
n_heads (int): Number of attention heads.
n_kv_heads (Optional[int]): Number of attention heads in key and
value features (if using GQA), or set to None for the same as
query.
multiple_of (int):
ffn_dim_multiplier (float):
norm_eps (float):
"""
super().__init__()
self.dim = dim
self.head_dim = dim // n_heads
self.attention = JointAttention(dim, n_heads, n_kv_heads, qk_norm, operation_settings=operation_settings)
self.feed_forward = FeedForward(
dim=dim,
hidden_dim=4 * dim,
multiple_of=multiple_of,
ffn_dim_multiplier=ffn_dim_multiplier,
operation_settings=operation_settings,
)
self.layer_id = layer_id
self.attention_norm1 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
self.ffn_norm1 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
self.attention_norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
self.ffn_norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
self.modulation = modulation
if modulation:
self.adaLN_modulation = nn.Sequential(
nn.SiLU(),
operation_settings.get("operations").Linear(
min(dim, 1024),
4 * dim,
bias=True,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
),
)
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
freqs_cis: torch.Tensor,
adaln_input: Optional[torch.Tensor]=None,
):
"""
Perform a forward pass through the TransformerBlock.
Args:
x (torch.Tensor): Input tensor.
freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.
Returns:
torch.Tensor: Output tensor after applying attention and
feedforward layers.
"""
if self.modulation:
assert adaln_input is not None
scale_msa, gate_msa, scale_mlp, gate_mlp = self.adaLN_modulation(adaln_input).chunk(4, dim=1)
x = x + gate_msa.unsqueeze(1).tanh() * self.attention_norm2(
self.attention(
modulate(self.attention_norm1(x), scale_msa),
x_mask,
freqs_cis,
)
)
x = x + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2(
self.feed_forward(
modulate(self.ffn_norm1(x), scale_mlp),
)
)
else:
assert adaln_input is None
x = x + self.attention_norm2(
self.attention(
self.attention_norm1(x),
x_mask,
freqs_cis,
)
)
x = x + self.ffn_norm2(
self.feed_forward(
self.ffn_norm1(x),
)
)
return x
class FinalLayer(nn.Module):
"""
The final layer of NextDiT.
"""
def __init__(self, hidden_size, patch_size, out_channels, operation_settings={}):
super().__init__()
self.norm_final = operation_settings.get("operations").LayerNorm(
hidden_size,
elementwise_affine=False,
eps=1e-6,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
self.linear = operation_settings.get("operations").Linear(
hidden_size,
patch_size * patch_size * out_channels,
bias=True,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
self.adaLN_modulation = nn.Sequential(
nn.SiLU(),
operation_settings.get("operations").Linear(
min(hidden_size, 1024),
hidden_size,
bias=True,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
),
)
def forward(self, x, c):
scale = self.adaLN_modulation(c)
x = modulate(self.norm_final(x), scale)
x = self.linear(x)
return x
class NextDiT(nn.Module):
"""
Diffusion model with a Transformer backbone.
"""
def __init__(
self,
patch_size: int = 2,
in_channels: int = 4,
dim: int = 4096,
n_layers: int = 32,
n_refiner_layers: int = 2,
n_heads: int = 32,
n_kv_heads: Optional[int] = None,
multiple_of: int = 256,
ffn_dim_multiplier: Optional[float] = None,
norm_eps: float = 1e-5,
qk_norm: bool = False,
cap_feat_dim: int = 5120,
axes_dims: List[int] = (16, 56, 56),
axes_lens: List[int] = (1, 512, 512),
image_model=None,
device=None,
dtype=None,
operations=None,
) -> None:
super().__init__()
self.dtype = dtype
operation_settings = {"operations": operations, "device": device, "dtype": dtype}
self.in_channels = in_channels
self.out_channels = in_channels
self.patch_size = patch_size
self.x_embedder = operation_settings.get("operations").Linear(
in_features=patch_size * patch_size * in_channels,
out_features=dim,
bias=True,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
self.noise_refiner = nn.ModuleList(
[
JointTransformerBlock(
layer_id,
dim,
n_heads,
n_kv_heads,
multiple_of,
ffn_dim_multiplier,
norm_eps,
qk_norm,
modulation=True,
operation_settings=operation_settings,
)
for layer_id in range(n_refiner_layers)
]
)
self.context_refiner = nn.ModuleList(
[
JointTransformerBlock(
layer_id,
dim,
n_heads,
n_kv_heads,
multiple_of,
ffn_dim_multiplier,
norm_eps,
qk_norm,
modulation=False,
operation_settings=operation_settings,
)
for layer_id in range(n_refiner_layers)
]
)
self.t_embedder = TimestepEmbedder(min(dim, 1024), **operation_settings)
self.cap_embedder = nn.Sequential(
RMSNorm(cap_feat_dim, eps=norm_eps, elementwise_affine=True, **operation_settings),
operation_settings.get("operations").Linear(
cap_feat_dim,
dim,
bias=True,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
),
)
self.layers = nn.ModuleList(
[
JointTransformerBlock(
layer_id,
dim,
n_heads,
n_kv_heads,
multiple_of,
ffn_dim_multiplier,
norm_eps,
qk_norm,
operation_settings=operation_settings,
)
for layer_id in range(n_layers)
]
)
self.norm_final = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
self.final_layer = FinalLayer(dim, patch_size, self.out_channels, operation_settings=operation_settings)
assert (dim // n_heads) == sum(axes_dims)
self.axes_dims = axes_dims
self.axes_lens = axes_lens
self.rope_embedder = EmbedND(dim=dim // n_heads, theta=10000.0, axes_dim=axes_dims)
self.dim = dim
self.n_heads = n_heads
def unpatchify(
self, x: torch.Tensor, img_size: List[Tuple[int, int]], cap_size: List[int], return_tensor=False
) -> List[torch.Tensor]:
"""
x: (N, T, patch_size**2 * C)
imgs: (N, H, W, C)
"""
pH = pW = self.patch_size
imgs = []
for i in range(x.size(0)):
H, W = img_size[i]
begin = cap_size[i]
end = begin + (H // pH) * (W // pW)
imgs.append(
x[i][begin:end]
.view(H // pH, W // pW, pH, pW, self.out_channels)
.permute(4, 0, 2, 1, 3)
.flatten(3, 4)
.flatten(1, 2)
)
if return_tensor:
imgs = torch.stack(imgs, dim=0)
return imgs
def patchify_and_embed(
self, x: List[torch.Tensor] | torch.Tensor, cap_feats: torch.Tensor, cap_mask: torch.Tensor, t: torch.Tensor, num_tokens
) -> Tuple[torch.Tensor, torch.Tensor, List[Tuple[int, int]], List[int], torch.Tensor]:
bsz = len(x)
pH = pW = self.patch_size
device = x[0].device
dtype = x[0].dtype
if cap_mask is not None:
l_effective_cap_len = cap_mask.sum(dim=1).tolist()
else:
l_effective_cap_len = [num_tokens] * bsz
if cap_mask is not None and not torch.is_floating_point(cap_mask):
cap_mask = (cap_mask - 1).to(dtype) * torch.finfo(dtype).max
img_sizes = [(img.size(1), img.size(2)) for img in x]
l_effective_img_len = [(H // pH) * (W // pW) for (H, W) in img_sizes]
max_seq_len = max(
(cap_len+img_len for cap_len, img_len in zip(l_effective_cap_len, l_effective_img_len))
)
max_cap_len = max(l_effective_cap_len)
max_img_len = max(l_effective_img_len)
position_ids = torch.zeros(bsz, max_seq_len, 3, dtype=torch.int32, device=device)
for i in range(bsz):
cap_len = l_effective_cap_len[i]
img_len = l_effective_img_len[i]
H, W = img_sizes[i]
H_tokens, W_tokens = H // pH, W // pW
assert H_tokens * W_tokens == img_len
position_ids[i, :cap_len, 0] = torch.arange(cap_len, dtype=torch.int32, device=device)
position_ids[i, cap_len:cap_len+img_len, 0] = cap_len
row_ids = torch.arange(H_tokens, dtype=torch.int32, device=device).view(-1, 1).repeat(1, W_tokens).flatten()
col_ids = torch.arange(W_tokens, dtype=torch.int32, device=device).view(1, -1).repeat(H_tokens, 1).flatten()
position_ids[i, cap_len:cap_len+img_len, 1] = row_ids
position_ids[i, cap_len:cap_len+img_len, 2] = col_ids
freqs_cis = self.rope_embedder(position_ids).movedim(1, 2).to(dtype)
# build freqs_cis for cap and image individually
cap_freqs_cis_shape = list(freqs_cis.shape)
# cap_freqs_cis_shape[1] = max_cap_len
cap_freqs_cis_shape[1] = cap_feats.shape[1]
cap_freqs_cis = torch.zeros(*cap_freqs_cis_shape, device=device, dtype=freqs_cis.dtype)
img_freqs_cis_shape = list(freqs_cis.shape)
img_freqs_cis_shape[1] = max_img_len
img_freqs_cis = torch.zeros(*img_freqs_cis_shape, device=device, dtype=freqs_cis.dtype)
for i in range(bsz):
cap_len = l_effective_cap_len[i]
img_len = l_effective_img_len[i]
cap_freqs_cis[i, :cap_len] = freqs_cis[i, :cap_len]
img_freqs_cis[i, :img_len] = freqs_cis[i, cap_len:cap_len+img_len]
# refine context
for layer in self.context_refiner:
cap_feats = layer(cap_feats, cap_mask, cap_freqs_cis)
# refine image
flat_x = []
for i in range(bsz):
img = x[i]
C, H, W = img.size()
img = img.view(C, H // pH, pH, W // pW, pW).permute(1, 3, 2, 4, 0).flatten(2).flatten(0, 1)
flat_x.append(img)
x = flat_x
padded_img_embed = torch.zeros(bsz, max_img_len, x[0].shape[-1], device=device, dtype=x[0].dtype)
padded_img_mask = torch.zeros(bsz, max_img_len, dtype=dtype, device=device)
for i in range(bsz):
padded_img_embed[i, :l_effective_img_len[i]] = x[i]
padded_img_mask[i, l_effective_img_len[i]:] = -torch.finfo(dtype).max
padded_img_embed = self.x_embedder(padded_img_embed)
padded_img_mask = padded_img_mask.unsqueeze(1)
for layer in self.noise_refiner:
padded_img_embed = layer(padded_img_embed, padded_img_mask, img_freqs_cis, t)
if cap_mask is not None:
mask = torch.zeros(bsz, max_seq_len, dtype=dtype, device=device)
mask[:, :max_cap_len] = cap_mask[:, :max_cap_len]
else:
mask = None
padded_full_embed = torch.zeros(bsz, max_seq_len, self.dim, device=device, dtype=x[0].dtype)
for i in range(bsz):
cap_len = l_effective_cap_len[i]
img_len = l_effective_img_len[i]
padded_full_embed[i, :cap_len] = cap_feats[i, :cap_len]
padded_full_embed[i, cap_len:cap_len+img_len] = padded_img_embed[i, :img_len]
return padded_full_embed, mask, img_sizes, l_effective_cap_len, freqs_cis
# def forward(self, x, t, cap_feats, cap_mask):
def forward(self, x, timesteps, context, num_tokens, attention_mask=None, **kwargs):
t = 1.0 - timesteps
cap_feats = context
cap_mask = attention_mask
bs, c, h, w = x.shape
x = comfy.ldm.common_dit.pad_to_patch_size(x, (self.patch_size, self.patch_size))
"""
Forward pass of NextDiT.
t: (N,) tensor of diffusion timesteps
y: (N,) tensor of text tokens/features
"""
t = self.t_embedder(t, dtype=x.dtype) # (N, D)
adaln_input = t
cap_feats = self.cap_embedder(cap_feats) # (N, L, D) # todo check if able to batchify w.o. redundant compute
x_is_tensor = isinstance(x, torch.Tensor)
x, mask, img_size, cap_size, freqs_cis = self.patchify_and_embed(x, cap_feats, cap_mask, t, num_tokens)
freqs_cis = freqs_cis.to(x.device)
for layer in self.layers:
x = layer(x, mask, freqs_cis, adaln_input)
x = self.final_layer(x, adaln_input)
x = self.unpatchify(x, img_size, cap_size, return_tensor=x_is_tensor)[:,:,:h,:w]
return -x

View File

@ -1,4 +1,6 @@
import math import math
import sys
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from torch import nn, einsum from torch import nn, einsum
@ -16,7 +18,11 @@ if model_management.xformers_enabled():
import xformers.ops import xformers.ops
if model_management.sage_attention_enabled(): if model_management.sage_attention_enabled():
from sageattention import sageattn try:
from sageattention import sageattn
except ModuleNotFoundError:
logging.error(f"\n\nTo use the `--use-sage-attention` feature, the `sageattention` package must be installed first.\ncommand:\n\t{sys.executable} -m pip install sageattention")
exit(-1)
from comfy.cli_args import args from comfy.cli_args import args
import comfy.ops import comfy.ops

View File

@ -321,7 +321,7 @@ class SelfAttention(nn.Module):
class RMSNorm(torch.nn.Module): class RMSNorm(torch.nn.Module):
def __init__( def __init__(
self, dim: int, elementwise_affine: bool = False, eps: float = 1e-6, device=None, dtype=None self, dim: int, elementwise_affine: bool = False, eps: float = 1e-6, device=None, dtype=None, **kwargs
): ):
""" """
Initialize the RMSNorm normalization layer. Initialize the RMSNorm normalization layer.

View File

@ -702,9 +702,6 @@ class Decoder(nn.Module):
padding=1) padding=1)
def forward(self, z, **kwargs): def forward(self, z, **kwargs):
#assert z.shape[1:] == self.z_shape[1:]
self.last_z_shape = z.shape
# timestep embedding # timestep embedding
temb = None temb = None

View File

@ -307,7 +307,6 @@ def model_lora_keys_unet(model, key_map={}):
if k.endswith(".weight"): if k.endswith(".weight"):
key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_") key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_")
key_map["lora_unet_{}".format(key_lora)] = k key_map["lora_unet_{}".format(key_lora)] = k
key_map["lora_prior_unet_{}".format(key_lora)] = k #cascade lora: TODO put lora key prefix in the model config
key_map["{}".format(k[:-len(".weight")])] = k #generic lora format without any weird key names key_map["{}".format(k[:-len(".weight")])] = k #generic lora format without any weird key names
else: else:
key_map["{}".format(k)] = k #generic lora format for not .weight without any weird key names key_map["{}".format(k)] = k #generic lora format for not .weight without any weird key names
@ -327,6 +326,13 @@ def model_lora_keys_unet(model, key_map={}):
diffusers_lora_key = diffusers_lora_key[:-2] diffusers_lora_key = diffusers_lora_key[:-2]
key_map[diffusers_lora_key] = unet_key key_map[diffusers_lora_key] = unet_key
if isinstance(model, comfy.model_base.StableCascade_C):
for k in sdk:
if k.startswith("diffusion_model."):
if k.endswith(".weight"):
key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_")
key_map["lora_prior_unet_{}".format(key_lora)] = k
if isinstance(model, comfy.model_base.SD3): #Diffusers lora SD3 if isinstance(model, comfy.model_base.SD3): #Diffusers lora SD3
diffusers_keys = comfy.utils.mmdit_to_diffusers(model.model_config.unet_config, output_prefix="diffusion_model.") diffusers_keys = comfy.utils.mmdit_to_diffusers(model.model_config.unet_config, output_prefix="diffusion_model.")
for k in diffusers_keys: for k in diffusers_keys:

View File

@ -34,6 +34,7 @@ import comfy.ldm.flux.model
import comfy.ldm.lightricks.model import comfy.ldm.lightricks.model
import comfy.ldm.hunyuan_video.model import comfy.ldm.hunyuan_video.model
import comfy.ldm.cosmos.model import comfy.ldm.cosmos.model
import comfy.ldm.lumina.model
import comfy.model_management import comfy.model_management
import comfy.patcher_extension import comfy.patcher_extension
@ -148,7 +149,9 @@ class BaseModel(torch.nn.Module):
xc = xc.to(dtype) xc = xc.to(dtype)
t = self.model_sampling.timestep(t).float() t = self.model_sampling.timestep(t).float()
context = context.to(dtype) if context is not None:
context = context.to(dtype)
extra_conds = {} extra_conds = {}
for o in kwargs: for o in kwargs:
extra = kwargs[o] extra = kwargs[o]
@ -163,9 +166,6 @@ class BaseModel(torch.nn.Module):
def get_dtype(self): def get_dtype(self):
return self.diffusion_model.dtype return self.diffusion_model.dtype
def is_adm(self):
return self.adm_channels > 0
def encode_adm(self, **kwargs): def encode_adm(self, **kwargs):
return None return None
@ -549,6 +549,10 @@ class SD_X4Upscaler(BaseModel):
out['c_concat'] = comfy.conds.CONDNoiseShape(image) out['c_concat'] = comfy.conds.CONDNoiseShape(image)
out['y'] = comfy.conds.CONDRegular(noise_level) out['y'] = comfy.conds.CONDRegular(noise_level)
cross_attn = kwargs.get("cross_attn", None)
if cross_attn is not None:
out['c_crossattn'] = comfy.conds.CONDCrossAttn(cross_attn)
return out return out
class IP2P: class IP2P:
@ -806,7 +810,10 @@ class Flux(BaseModel):
(h_tok, w_tok) = (math.ceil(shape[2] / self.diffusion_model.patch_size), math.ceil(shape[3] / self.diffusion_model.patch_size)) (h_tok, w_tok) = (math.ceil(shape[2] / self.diffusion_model.patch_size), math.ceil(shape[3] / self.diffusion_model.patch_size))
attention_mask = utils.upscale_dit_mask(attention_mask, mask_ref_size, (h_tok, w_tok)) attention_mask = utils.upscale_dit_mask(attention_mask, mask_ref_size, (h_tok, w_tok))
out['attention_mask'] = comfy.conds.CONDRegular(attention_mask) out['attention_mask'] = comfy.conds.CONDRegular(attention_mask)
out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([kwargs.get("guidance", 3.5)]))
guidance = kwargs.get("guidance", 3.5)
if guidance is not None:
out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance]))
return out return out
class GenmoMochi(BaseModel): class GenmoMochi(BaseModel):
@ -863,7 +870,10 @@ class HunyuanVideo(BaseModel):
cross_attn = kwargs.get("cross_attn", None) cross_attn = kwargs.get("cross_attn", None)
if cross_attn is not None: if cross_attn is not None:
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn) out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([kwargs.get("guidance", 6.0)]))
guidance = kwargs.get("guidance", 6.0)
if guidance is not None:
out['guidance'] = comfy.conds.CONDRegular(torch.FloatTensor([guidance]))
return out return out
class CosmosVideo(BaseModel): class CosmosVideo(BaseModel):
@ -892,3 +902,19 @@ class CosmosVideo(BaseModel):
latent_image = latent_image + noise latent_image = latent_image + noise
latent_image = self.model_sampling.calculate_input(torch.tensor([sigma_noise_augmentation], device=latent_image.device, dtype=latent_image.dtype), latent_image) latent_image = self.model_sampling.calculate_input(torch.tensor([sigma_noise_augmentation], device=latent_image.device, dtype=latent_image.dtype), latent_image)
return latent_image * ((sigma ** 2 + self.model_sampling.sigma_data ** 2) ** 0.5) return latent_image * ((sigma ** 2 + self.model_sampling.sigma_data ** 2) ** 0.5)
class Lumina2(BaseModel):
def __init__(self, model_config, model_type=ModelType.FLOW, device=None):
super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.lumina.model.NextDiT)
def extra_conds(self, **kwargs):
out = super().extra_conds(**kwargs)
attention_mask = kwargs.get("attention_mask", None)
if attention_mask is not None:
if torch.numel(attention_mask) != attention_mask.sum():
out['attention_mask'] = comfy.conds.CONDRegular(attention_mask)
out['num_tokens'] = comfy.conds.CONDConstant(max(1, torch.sum(attention_mask).item()))
cross_attn = kwargs.get("cross_attn", None)
if cross_attn is not None:
out['c_crossattn'] = comfy.conds.CONDRegular(cross_attn)
return out

View File

@ -239,7 +239,7 @@ def detect_unet_config(state_dict, key_prefix):
dit_config["micro_condition"] = False dit_config["micro_condition"] = False
return dit_config return dit_config
if '{}blocks.block0.blocks.0.block.attn.to_q.0.weight'.format(key_prefix) in state_dict_keys: if '{}blocks.block0.blocks.0.block.attn.to_q.0.weight'.format(key_prefix) in state_dict_keys: # Cosmos
dit_config = {} dit_config = {}
dit_config["image_model"] = "cosmos" dit_config["image_model"] = "cosmos"
dit_config["max_img_h"] = 240 dit_config["max_img_h"] = 240
@ -284,6 +284,21 @@ def detect_unet_config(state_dict, key_prefix):
dit_config["extra_per_block_abs_pos_emb_type"] = "learnable" dit_config["extra_per_block_abs_pos_emb_type"] = "learnable"
return dit_config return dit_config
if '{}cap_embedder.1.weight'.format(key_prefix) in state_dict_keys: # Lumina 2
dit_config = {}
dit_config["image_model"] = "lumina2"
dit_config["patch_size"] = 2
dit_config["in_channels"] = 16
dit_config["dim"] = 2304
dit_config["cap_feat_dim"] = 2304
dit_config["n_layers"] = 26
dit_config["n_heads"] = 24
dit_config["n_kv_heads"] = 8
dit_config["qk_norm"] = True
dit_config["axes_dims"] = [32, 32, 32]
dit_config["axes_lens"] = [300, 512, 512]
return dit_config
if '{}input_blocks.0.0.weight'.format(key_prefix) not in state_dict_keys: if '{}input_blocks.0.0.weight'.format(key_prefix) not in state_dict_keys:
return None return None

View File

@ -218,7 +218,7 @@ def is_amd():
MIN_WEIGHT_MEMORY_RATIO = 0.4 MIN_WEIGHT_MEMORY_RATIO = 0.4
if is_nvidia(): if is_nvidia():
MIN_WEIGHT_MEMORY_RATIO = 0.2 MIN_WEIGHT_MEMORY_RATIO = 0.1
ENABLE_PYTORCH_ATTENTION = False ENABLE_PYTORCH_ATTENTION = False
if args.use_pytorch_cross_attention: if args.use_pytorch_cross_attention:
@ -241,6 +241,12 @@ if ENABLE_PYTORCH_ATTENTION:
torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_flash_sdp(True)
torch.backends.cuda.enable_mem_efficient_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp(True)
try:
if is_nvidia() and args.fast:
torch.backends.cuda.matmul.allow_fp16_accumulation = True
except:
pass
try: try:
if int(torch_version[0]) == 2 and int(torch_version[2]) >= 5: if int(torch_version[0]) == 2 and int(torch_version[2]) >= 5:
torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(True) torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(True)
@ -256,15 +262,10 @@ elif args.highvram or args.gpu_only:
vram_state = VRAMState.HIGH_VRAM vram_state = VRAMState.HIGH_VRAM
FORCE_FP32 = False FORCE_FP32 = False
FORCE_FP16 = False
if args.force_fp32: if args.force_fp32:
logging.info("Forcing FP32, if this improves things please report it.") logging.info("Forcing FP32, if this improves things please report it.")
FORCE_FP32 = True FORCE_FP32 = True
if args.force_fp16:
logging.info("Forcing FP16.")
FORCE_FP16 = True
if lowvram_available: if lowvram_available:
if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM): if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM):
vram_state = set_vram_to vram_state = set_vram_to
@ -535,14 +536,11 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu
vram_set_state = vram_state vram_set_state = vram_state
lowvram_model_memory = 0 lowvram_model_memory = 0
if lowvram_available and (vram_set_state == VRAMState.LOW_VRAM or vram_set_state == VRAMState.NORMAL_VRAM) and not force_full_load: if lowvram_available and (vram_set_state == VRAMState.LOW_VRAM or vram_set_state == VRAMState.NORMAL_VRAM) and not force_full_load:
model_size = loaded_model.model_memory_required(torch_dev)
loaded_memory = loaded_model.model_loaded_memory() loaded_memory = loaded_model.model_loaded_memory()
current_free_mem = get_free_memory(torch_dev) + loaded_memory current_free_mem = get_free_memory(torch_dev) + loaded_memory
lowvram_model_memory = max(64 * 1024 * 1024, (current_free_mem - minimum_memory_required), min(current_free_mem * MIN_WEIGHT_MEMORY_RATIO, current_free_mem - minimum_inference_memory())) lowvram_model_memory = max(64 * 1024 * 1024, (current_free_mem - minimum_memory_required), min(current_free_mem * MIN_WEIGHT_MEMORY_RATIO, current_free_mem - minimum_inference_memory()))
lowvram_model_memory = max(0.1, lowvram_model_memory - loaded_memory) lowvram_model_memory = max(0.1, lowvram_model_memory - loaded_memory)
if model_size <= lowvram_model_memory: #only switch to lowvram if really necessary
lowvram_model_memory = 0
if vram_set_state == VRAMState.NO_VRAM: if vram_set_state == VRAMState.NO_VRAM:
lowvram_model_memory = 0.1 lowvram_model_memory = 0.1
@ -998,6 +996,13 @@ def is_device_mps(device):
def is_device_cuda(device): def is_device_cuda(device):
return is_device_type(device, 'cuda') return is_device_type(device, 'cuda')
def is_directml_enabled():
global directml_enabled
if directml_enabled:
return True
return False
def should_use_fp16(device=None, model_params=0, prioritize_performance=True, manual_cast=False): def should_use_fp16(device=None, model_params=0, prioritize_performance=True, manual_cast=False):
global directml_enabled global directml_enabled
@ -1005,7 +1010,7 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True, ma
if is_device_cpu(device): if is_device_cpu(device):
return False return False
if FORCE_FP16: if args.force_fp16:
return True return True
if FORCE_FP32: if FORCE_FP32:
@ -1083,6 +1088,9 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma
if is_intel_xpu(): if is_intel_xpu():
return True return True
if is_ascend_npu():
return True
props = torch.cuda.get_device_properties(device) props = torch.cuda.get_device_properties(device)
if props.major >= 8: if props.major >= 8:
return True return True

View File

@ -96,8 +96,28 @@ def wipe_lowvram_weight(m):
if hasattr(m, "prev_comfy_cast_weights"): if hasattr(m, "prev_comfy_cast_weights"):
m.comfy_cast_weights = m.prev_comfy_cast_weights m.comfy_cast_weights = m.prev_comfy_cast_weights
del m.prev_comfy_cast_weights del m.prev_comfy_cast_weights
m.weight_function = None
m.bias_function = None if hasattr(m, "weight_function"):
m.weight_function = []
if hasattr(m, "bias_function"):
m.bias_function = []
def move_weight_functions(m, device):
if device is None:
return 0
memory = 0
if hasattr(m, "weight_function"):
for f in m.weight_function:
if hasattr(f, "move_to"):
memory += f.move_to(device=device)
if hasattr(m, "bias_function"):
for f in m.bias_function:
if hasattr(f, "move_to"):
memory += f.move_to(device=device)
return memory
class LowVramPatch: class LowVramPatch:
def __init__(self, key, patches): def __init__(self, key, patches):
@ -192,6 +212,7 @@ class ModelPatcher:
self.backup = {} self.backup = {}
self.object_patches = {} self.object_patches = {}
self.object_patches_backup = {} self.object_patches_backup = {}
self.weight_wrapper_patches = {}
self.model_options = {"transformer_options":{}} self.model_options = {"transformer_options":{}}
self.model_size() self.model_size()
self.load_device = load_device self.load_device = load_device
@ -250,6 +271,7 @@ class ModelPatcher:
n.patches_uuid = self.patches_uuid n.patches_uuid = self.patches_uuid
n.object_patches = self.object_patches.copy() n.object_patches = self.object_patches.copy()
n.weight_wrapper_patches = self.weight_wrapper_patches.copy()
n.model_options = copy.deepcopy(self.model_options) n.model_options = copy.deepcopy(self.model_options)
n.backup = self.backup n.backup = self.backup
n.object_patches_backup = self.object_patches_backup n.object_patches_backup = self.object_patches_backup
@ -402,6 +424,10 @@ class ModelPatcher:
def add_object_patch(self, name, obj): def add_object_patch(self, name, obj):
self.object_patches[name] = obj self.object_patches[name] = obj
def add_weight_wrapper(self, name, function):
self.weight_wrapper_patches[name] = self.weight_wrapper_patches.get(name, []) + [function]
self.patches_uuid = uuid.uuid4()
def get_model_object(self, name: str) -> torch.nn.Module: def get_model_object(self, name: str) -> torch.nn.Module:
"""Retrieves a nested attribute from an object using dot notation considering """Retrieves a nested attribute from an object using dot notation considering
object patches. object patches.
@ -566,6 +592,9 @@ class ModelPatcher:
lowvram_weight = False lowvram_weight = False
weight_key = "{}.weight".format(n)
bias_key = "{}.bias".format(n)
if not full_load and hasattr(m, "comfy_cast_weights"): if not full_load and hasattr(m, "comfy_cast_weights"):
if mem_counter + module_mem >= lowvram_model_memory: if mem_counter + module_mem >= lowvram_model_memory:
lowvram_weight = True lowvram_weight = True
@ -573,34 +602,42 @@ class ModelPatcher:
if hasattr(m, "prev_comfy_cast_weights"): #Already lowvramed if hasattr(m, "prev_comfy_cast_weights"): #Already lowvramed
continue continue
weight_key = "{}.weight".format(n)
bias_key = "{}.bias".format(n)
if lowvram_weight: if lowvram_weight:
if hasattr(m, "comfy_cast_weights"):
m.weight_function = []
m.bias_function = []
if weight_key in self.patches: if weight_key in self.patches:
if force_patch_weights: if force_patch_weights:
self.patch_weight_to_device(weight_key) self.patch_weight_to_device(weight_key)
else: else:
m.weight_function = LowVramPatch(weight_key, self.patches) m.weight_function = [LowVramPatch(weight_key, self.patches)]
patch_counter += 1 patch_counter += 1
if bias_key in self.patches: if bias_key in self.patches:
if force_patch_weights: if force_patch_weights:
self.patch_weight_to_device(bias_key) self.patch_weight_to_device(bias_key)
else: else:
m.bias_function = LowVramPatch(bias_key, self.patches) m.bias_function = [LowVramPatch(bias_key, self.patches)]
patch_counter += 1 patch_counter += 1
m.prev_comfy_cast_weights = m.comfy_cast_weights m.prev_comfy_cast_weights = m.comfy_cast_weights
m.comfy_cast_weights = True m.comfy_cast_weights = True
else: else:
if hasattr(m, "comfy_cast_weights"): if hasattr(m, "comfy_cast_weights"):
if m.comfy_cast_weights: wipe_lowvram_weight(m)
wipe_lowvram_weight(m)
if full_load or mem_counter + module_mem < lowvram_model_memory: if full_load or mem_counter + module_mem < lowvram_model_memory:
mem_counter += module_mem mem_counter += module_mem
load_completely.append((module_mem, n, m, params)) load_completely.append((module_mem, n, m, params))
if weight_key in self.weight_wrapper_patches:
m.weight_function.extend(self.weight_wrapper_patches[weight_key])
if bias_key in self.weight_wrapper_patches:
m.bias_function.extend(self.weight_wrapper_patches[bias_key])
mem_counter += move_weight_functions(m, device_to)
load_completely.sort(reverse=True) load_completely.sort(reverse=True)
for x in load_completely: for x in load_completely:
n = x[1] n = x[1]
@ -662,6 +699,7 @@ class ModelPatcher:
self.unpatch_hooks() self.unpatch_hooks()
if self.model.model_lowvram: if self.model.model_lowvram:
for m in self.model.modules(): for m in self.model.modules():
move_weight_functions(m, device_to)
wipe_lowvram_weight(m) wipe_lowvram_weight(m)
self.model.model_lowvram = False self.model.model_lowvram = False
@ -729,12 +767,13 @@ class ModelPatcher:
bias_key = "{}.bias".format(n) bias_key = "{}.bias".format(n)
if move_weight: if move_weight:
m.to(device_to) m.to(device_to)
module_mem += move_weight_functions(m, device_to)
if lowvram_possible: if lowvram_possible:
if weight_key in self.patches: if weight_key in self.patches:
m.weight_function = LowVramPatch(weight_key, self.patches) m.weight_function.append(LowVramPatch(weight_key, self.patches))
patch_counter += 1 patch_counter += 1
if bias_key in self.patches: if bias_key in self.patches:
m.bias_function = LowVramPatch(bias_key, self.patches) m.bias_function.append(LowVramPatch(bias_key, self.patches))
patch_counter += 1 patch_counter += 1
m.prev_comfy_cast_weights = m.comfy_cast_weights m.prev_comfy_cast_weights = m.comfy_cast_weights

View File

@ -31,6 +31,7 @@ class EPS:
return model_input - model_output * sigma return model_input - model_output * sigma
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False): def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
if max_denoise: if max_denoise:
noise = noise * torch.sqrt(1.0 + sigma ** 2.0) noise = noise * torch.sqrt(1.0 + sigma ** 2.0)
else: else:
@ -61,9 +62,11 @@ class CONST:
return model_input - model_output * sigma return model_input - model_output * sigma
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False): def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
return sigma * noise + (1.0 - sigma) * latent_image return sigma * noise + (1.0 - sigma) * latent_image
def inverse_noise_scaling(self, sigma, latent): def inverse_noise_scaling(self, sigma, latent):
sigma = sigma.view(sigma.shape[:1] + (1,) * (latent.ndim - 1))
return latent / (1.0 - sigma) return latent / (1.0 - sigma)
class ModelSamplingDiscrete(torch.nn.Module): class ModelSamplingDiscrete(torch.nn.Module):

View File

@ -38,21 +38,23 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None):
bias = None bias = None
non_blocking = comfy.model_management.device_supports_non_blocking(device) non_blocking = comfy.model_management.device_supports_non_blocking(device)
if s.bias is not None: if s.bias is not None:
has_function = s.bias_function is not None has_function = len(s.bias_function) > 0
bias = comfy.model_management.cast_to(s.bias, bias_dtype, device, non_blocking=non_blocking, copy=has_function) bias = comfy.model_management.cast_to(s.bias, bias_dtype, device, non_blocking=non_blocking, copy=has_function)
if has_function: if has_function:
bias = s.bias_function(bias) for f in s.bias_function:
bias = f(bias)
has_function = s.weight_function is not None has_function = len(s.weight_function) > 0
weight = comfy.model_management.cast_to(s.weight, dtype, device, non_blocking=non_blocking, copy=has_function) weight = comfy.model_management.cast_to(s.weight, dtype, device, non_blocking=non_blocking, copy=has_function)
if has_function: if has_function:
weight = s.weight_function(weight) for f in s.weight_function:
weight = f(weight)
return weight, bias return weight, bias
class CastWeightBiasOp: class CastWeightBiasOp:
comfy_cast_weights = False comfy_cast_weights = False
weight_function = None weight_function = []
bias_function = None bias_function = []
class disable_weight_init: class disable_weight_init:
class Linear(torch.nn.Linear, CastWeightBiasOp): class Linear(torch.nn.Linear, CastWeightBiasOp):
@ -64,7 +66,7 @@ class disable_weight_init:
return torch.nn.functional.linear(input, weight, bias) return torch.nn.functional.linear(input, weight, bias)
def forward(self, *args, **kwargs): def forward(self, *args, **kwargs):
if self.comfy_cast_weights: if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs) return self.forward_comfy_cast_weights(*args, **kwargs)
else: else:
return super().forward(*args, **kwargs) return super().forward(*args, **kwargs)
@ -78,7 +80,7 @@ class disable_weight_init:
return self._conv_forward(input, weight, bias) return self._conv_forward(input, weight, bias)
def forward(self, *args, **kwargs): def forward(self, *args, **kwargs):
if self.comfy_cast_weights: if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs) return self.forward_comfy_cast_weights(*args, **kwargs)
else: else:
return super().forward(*args, **kwargs) return super().forward(*args, **kwargs)
@ -92,7 +94,7 @@ class disable_weight_init:
return self._conv_forward(input, weight, bias) return self._conv_forward(input, weight, bias)
def forward(self, *args, **kwargs): def forward(self, *args, **kwargs):
if self.comfy_cast_weights: if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs) return self.forward_comfy_cast_weights(*args, **kwargs)
else: else:
return super().forward(*args, **kwargs) return super().forward(*args, **kwargs)
@ -106,7 +108,7 @@ class disable_weight_init:
return self._conv_forward(input, weight, bias) return self._conv_forward(input, weight, bias)
def forward(self, *args, **kwargs): def forward(self, *args, **kwargs):
if self.comfy_cast_weights: if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs) return self.forward_comfy_cast_weights(*args, **kwargs)
else: else:
return super().forward(*args, **kwargs) return super().forward(*args, **kwargs)
@ -120,12 +122,11 @@ class disable_weight_init:
return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps) return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps)
def forward(self, *args, **kwargs): def forward(self, *args, **kwargs):
if self.comfy_cast_weights: if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs) return self.forward_comfy_cast_weights(*args, **kwargs)
else: else:
return super().forward(*args, **kwargs) return super().forward(*args, **kwargs)
class LayerNorm(torch.nn.LayerNorm, CastWeightBiasOp): class LayerNorm(torch.nn.LayerNorm, CastWeightBiasOp):
def reset_parameters(self): def reset_parameters(self):
return None return None
@ -139,7 +140,7 @@ class disable_weight_init:
return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps) return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps)
def forward(self, *args, **kwargs): def forward(self, *args, **kwargs):
if self.comfy_cast_weights: if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs) return self.forward_comfy_cast_weights(*args, **kwargs)
else: else:
return super().forward(*args, **kwargs) return super().forward(*args, **kwargs)
@ -160,7 +161,7 @@ class disable_weight_init:
output_padding, self.groups, self.dilation) output_padding, self.groups, self.dilation)
def forward(self, *args, **kwargs): def forward(self, *args, **kwargs):
if self.comfy_cast_weights: if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs) return self.forward_comfy_cast_weights(*args, **kwargs)
else: else:
return super().forward(*args, **kwargs) return super().forward(*args, **kwargs)
@ -181,7 +182,7 @@ class disable_weight_init:
output_padding, self.groups, self.dilation) output_padding, self.groups, self.dilation)
def forward(self, *args, **kwargs): def forward(self, *args, **kwargs):
if self.comfy_cast_weights: if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs) return self.forward_comfy_cast_weights(*args, **kwargs)
else: else:
return super().forward(*args, **kwargs) return super().forward(*args, **kwargs)
@ -199,7 +200,7 @@ class disable_weight_init:
return torch.nn.functional.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse).to(dtype=output_dtype) return torch.nn.functional.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse).to(dtype=output_dtype)
def forward(self, *args, **kwargs): def forward(self, *args, **kwargs):
if self.comfy_cast_weights: if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs) return self.forward_comfy_cast_weights(*args, **kwargs)
else: else:
if "out_dtype" in kwargs: if "out_dtype" in kwargs:

View File

@ -58,7 +58,6 @@ def convert_cond(cond):
temp = c[1].copy() temp = c[1].copy()
model_conds = temp.get("model_conds", {}) model_conds = temp.get("model_conds", {})
if c[0] is not None: if c[0] is not None:
model_conds["c_crossattn"] = comfy.conds.CONDCrossAttn(c[0]) #TODO: remove
temp["cross_attn"] = c[0] temp["cross_attn"] = c[0]
temp["model_conds"] = model_conds temp["model_conds"] = model_conds
temp["uuid"] = uuid.uuid4() temp["uuid"] = uuid.uuid4()

View File

@ -12,7 +12,6 @@ import collections
from comfy import model_management from comfy import model_management
import math import math
import logging import logging
import comfy.samplers
import comfy.sampler_helpers import comfy.sampler_helpers
import comfy.model_patcher import comfy.model_patcher
import comfy.patcher_extension import comfy.patcher_extension
@ -178,7 +177,7 @@ def finalize_default_conds(model: 'BaseModel', hooked_to_run: dict[comfy.hooks.H
cond = default_conds[i] cond = default_conds[i]
for x in cond: for x in cond:
# do get_area_and_mult to get all the expected values # do get_area_and_mult to get all the expected values
p = comfy.samplers.get_area_and_mult(x, x_in, timestep) p = get_area_and_mult(x, x_in, timestep)
if p is None: if p is None:
continue continue
# replace p's mult with calculated mult # replace p's mult with calculated mult
@ -215,7 +214,7 @@ def _calc_cond_batch(model: 'BaseModel', conds: list[list[dict]], x_in: torch.Te
default_c.append(x) default_c.append(x)
has_default_conds = True has_default_conds = True
continue continue
p = comfy.samplers.get_area_and_mult(x, x_in, timestep) p = get_area_and_mult(x, x_in, timestep)
if p is None: if p is None:
continue continue
if p.hooks is not None: if p.hooks is not None:
@ -687,7 +686,8 @@ class Sampler:
KSAMPLER_NAMES = ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2","dpm_2", "dpm_2_ancestral", KSAMPLER_NAMES = ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2","dpm_2", "dpm_2_ancestral",
"lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu",
"dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm",
"ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp"] "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp",
"gradient_estimation"]
class KSAMPLER(Sampler): class KSAMPLER(Sampler):
def __init__(self, sampler_function, extra_options={}, inpaint_options={}): def __init__(self, sampler_function, extra_options={}, inpaint_options={}):

View File

@ -36,6 +36,7 @@ import comfy.text_encoders.genmo
import comfy.text_encoders.lt import comfy.text_encoders.lt
import comfy.text_encoders.hunyuan_video import comfy.text_encoders.hunyuan_video
import comfy.text_encoders.cosmos import comfy.text_encoders.cosmos
import comfy.text_encoders.lumina2
import comfy.model_patcher import comfy.model_patcher
import comfy.lora import comfy.lora
@ -657,6 +658,7 @@ class CLIPType(Enum):
HUNYUAN_VIDEO = 9 HUNYUAN_VIDEO = 9
PIXART = 10 PIXART = 10
COSMOS = 11 COSMOS = 11
LUMINA2 = 12
def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}): def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}):
@ -675,6 +677,7 @@ class TEModel(Enum):
T5_BASE = 6 T5_BASE = 6
LLAMA3_8 = 7 LLAMA3_8 = 7
T5_XXL_OLD = 8 T5_XXL_OLD = 8
GEMMA_2_2B = 9
def detect_te_model(sd): def detect_te_model(sd):
if "text_model.encoder.layers.30.mlp.fc1.weight" in sd: if "text_model.encoder.layers.30.mlp.fc1.weight" in sd:
@ -693,6 +696,8 @@ def detect_te_model(sd):
return TEModel.T5_XXL_OLD return TEModel.T5_XXL_OLD
if "encoder.block.0.layer.0.SelfAttention.k.weight" in sd: if "encoder.block.0.layer.0.SelfAttention.k.weight" in sd:
return TEModel.T5_BASE return TEModel.T5_BASE
if 'model.layers.0.post_feedforward_layernorm.weight' in sd:
return TEModel.GEMMA_2_2B
if "model.layers.0.post_attention_layernorm.weight" in sd: if "model.layers.0.post_attention_layernorm.weight" in sd:
return TEModel.LLAMA3_8 return TEModel.LLAMA3_8
return None return None
@ -730,6 +735,7 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip
if "text_projection" in clip_data[i]: if "text_projection" in clip_data[i]:
clip_data[i]["text_projection.weight"] = clip_data[i]["text_projection"].transpose(0, 1) #old models saved with the CLIPSave node clip_data[i]["text_projection.weight"] = clip_data[i]["text_projection"].transpose(0, 1) #old models saved with the CLIPSave node
tokenizer_data = {}
clip_target = EmptyClass() clip_target = EmptyClass()
clip_target.params = {} clip_target.params = {}
if len(clip_data) == 1: if len(clip_data) == 1:
@ -769,6 +775,10 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip
elif te_model == TEModel.T5_BASE: elif te_model == TEModel.T5_BASE:
clip_target.clip = comfy.text_encoders.sa_t5.SAT5Model clip_target.clip = comfy.text_encoders.sa_t5.SAT5Model
clip_target.tokenizer = comfy.text_encoders.sa_t5.SAT5Tokenizer clip_target.tokenizer = comfy.text_encoders.sa_t5.SAT5Tokenizer
elif te_model == TEModel.GEMMA_2_2B:
clip_target.clip = comfy.text_encoders.lumina2.te(**llama_detect(clip_data))
clip_target.tokenizer = comfy.text_encoders.lumina2.LuminaTokenizer
tokenizer_data["spiece_model"] = clip_data[0].get("spiece_model", None)
else: else:
if clip_type == CLIPType.SD3: if clip_type == CLIPType.SD3:
clip_target.clip = comfy.text_encoders.sd3_clip.sd3_clip(clip_l=True, clip_g=False, t5=False) clip_target.clip = comfy.text_encoders.sd3_clip.sd3_clip(clip_l=True, clip_g=False, t5=False)
@ -798,7 +808,6 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip
clip_target.tokenizer = comfy.text_encoders.sd3_clip.SD3Tokenizer clip_target.tokenizer = comfy.text_encoders.sd3_clip.SD3Tokenizer
parameters = 0 parameters = 0
tokenizer_data = {}
for c in clip_data: for c in clip_data:
parameters += comfy.utils.calculate_parameters(c) parameters += comfy.utils.calculate_parameters(c)
tokenizer_data, model_options = comfy.text_encoders.long_clipl.model_options_long_clip(c, tokenizer_data, model_options) tokenizer_data, model_options = comfy.text_encoders.long_clipl.model_options_long_clip(c, tokenizer_data, model_options)

View File

@ -421,10 +421,10 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
return embed_out return embed_out
class SDTokenizer: class SDTokenizer:
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, tokenizer_data={}): def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, tokenizer_data={}, tokenizer_args={}):
if tokenizer_path is None: if tokenizer_path is None:
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer") tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path) self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path, **tokenizer_args)
self.max_length = max_length self.max_length = max_length
self.min_length = min_length self.min_length = min_length
self.end_token = None self.end_token = None
@ -585,9 +585,14 @@ class SDTokenizer:
return {} return {}
class SD1Tokenizer: class SD1Tokenizer:
def __init__(self, embedding_directory=None, tokenizer_data={}, clip_name="l", tokenizer=SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}, clip_name="l", tokenizer=SDTokenizer, name=None):
self.clip_name = clip_name if name is not None:
self.clip = "clip_{}".format(self.clip_name) self.clip_name = name
self.clip = "{}".format(self.clip_name)
else:
self.clip_name = clip_name
self.clip = "clip_{}".format(self.clip_name)
tokenizer = tokenizer_data.get("{}_tokenizer_class".format(self.clip), tokenizer) tokenizer = tokenizer_data.get("{}_tokenizer_class".format(self.clip), tokenizer)
setattr(self, self.clip, tokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data)) setattr(self, self.clip, tokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data))
@ -600,7 +605,7 @@ class SD1Tokenizer:
return getattr(self, self.clip).untokenize(token_weight_pair) return getattr(self, self.clip).untokenize(token_weight_pair)
def state_dict(self): def state_dict(self):
return {} return getattr(self, self.clip).state_dict()
class SD1CheckpointClipModel(SDClipModel): class SD1CheckpointClipModel(SDClipModel):
def __init__(self, device="cpu", dtype=None, model_options={}): def __init__(self, device="cpu", dtype=None, model_options={}):

View File

@ -15,6 +15,7 @@ import comfy.text_encoders.genmo
import comfy.text_encoders.lt import comfy.text_encoders.lt
import comfy.text_encoders.hunyuan_video import comfy.text_encoders.hunyuan_video
import comfy.text_encoders.cosmos import comfy.text_encoders.cosmos
import comfy.text_encoders.lumina2
from . import supported_models_base from . import supported_models_base
from . import latent_formats from . import latent_formats
@ -865,6 +866,35 @@ class CosmosI2V(CosmosT2V):
out = model_base.CosmosVideo(self, image_to_video=True, device=device) out = model_base.CosmosVideo(self, image_to_video=True, device=device)
return out return out
models = [Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo, CosmosT2V, CosmosI2V] class Lumina2(supported_models_base.BASE):
unet_config = {
"image_model": "lumina2",
}
sampling_settings = {
"multiplier": 1.0,
"shift": 6.0,
}
memory_usage_factor = 1.2
unet_extra_config = {}
latent_format = latent_formats.Flux
supported_inference_dtypes = [torch.bfloat16, torch.float32]
vae_key_prefix = ["vae."]
text_encoder_key_prefix = ["text_encoders."]
def get_model(self, state_dict, prefix="", device=None):
out = model_base.Lumina2(self, device=device)
return out
def clip_target(self, state_dict={}):
pref = self.text_encoder_key_prefix[0]
hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}gemma2_2b.transformer.".format(pref))
return supported_models_base.ClipTarget(comfy.text_encoders.lumina2.LuminaTokenizer, comfy.text_encoders.lumina2.te(**hunyuan_detect))
models = [Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2]
models += [SVD_img2vid] models += [SVD_img2vid]

View File

@ -118,7 +118,7 @@ class BertModel_(torch.nn.Module):
mask = None mask = None
if attention_mask is not None: if attention_mask is not None:
mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]) mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
mask = mask.masked_fill(mask.to(torch.bool), float("-inf")) mask = mask.masked_fill(mask.to(torch.bool), -torch.finfo(x.dtype).max)
x, i = self.encoder(x, mask, intermediate_output) x, i = self.encoder(x, mask, intermediate_output)
return x, i return x, i

View File

@ -1,6 +1,5 @@
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass from dataclasses import dataclass
from typing import Optional, Any from typing import Optional, Any
@ -21,15 +20,41 @@ class Llama2Config:
max_position_embeddings: int = 8192 max_position_embeddings: int = 8192
rms_norm_eps: float = 1e-5 rms_norm_eps: float = 1e-5
rope_theta: float = 500000.0 rope_theta: float = 500000.0
transformer_type: str = "llama"
head_dim = 128
rms_norm_add = False
mlp_activation = "silu"
@dataclass
class Gemma2_2B_Config:
vocab_size: int = 256000
hidden_size: int = 2304
intermediate_size: int = 9216
num_hidden_layers: int = 26
num_attention_heads: int = 8
num_key_value_heads: int = 4
max_position_embeddings: int = 8192
rms_norm_eps: float = 1e-6
rope_theta: float = 10000.0
transformer_type: str = "gemma2"
head_dim = 256
rms_norm_add = True
mlp_activation = "gelu_pytorch_tanh"
class RMSNorm(nn.Module): class RMSNorm(nn.Module):
def __init__(self, dim: int, eps: float = 1e-5, device=None, dtype=None): def __init__(self, dim: int, eps: float = 1e-5, add=False, device=None, dtype=None):
super().__init__() super().__init__()
self.eps = eps self.eps = eps
self.weight = nn.Parameter(torch.empty(dim, device=device, dtype=dtype)) self.weight = nn.Parameter(torch.empty(dim, device=device, dtype=dtype))
self.add = add
def forward(self, x: torch.Tensor): def forward(self, x: torch.Tensor):
return comfy.ldm.common_dit.rms_norm(x, self.weight, self.eps) w = self.weight
if self.add:
w = w + 1.0
return comfy.ldm.common_dit.rms_norm(x, w, self.eps)
def rotate_half(x): def rotate_half(x):
@ -68,13 +93,15 @@ class Attention(nn.Module):
self.num_heads = config.num_attention_heads self.num_heads = config.num_attention_heads
self.num_kv_heads = config.num_key_value_heads self.num_kv_heads = config.num_key_value_heads
self.hidden_size = config.hidden_size self.hidden_size = config.hidden_size
self.head_dim = self.hidden_size // self.num_heads
self.head_dim = config.head_dim
self.inner_size = self.num_heads * self.head_dim
ops = ops or nn ops = ops or nn
self.q_proj = ops.Linear(config.hidden_size, config.hidden_size, bias=False, device=device, dtype=dtype) self.q_proj = ops.Linear(config.hidden_size, self.inner_size, bias=False, device=device, dtype=dtype)
self.k_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=False, device=device, dtype=dtype) self.k_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=False, device=device, dtype=dtype)
self.v_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=False, device=device, dtype=dtype) self.v_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=False, device=device, dtype=dtype)
self.o_proj = ops.Linear(config.hidden_size, config.hidden_size, bias=False, device=device, dtype=dtype) self.o_proj = ops.Linear(self.inner_size, config.hidden_size, bias=False, device=device, dtype=dtype)
def forward( def forward(
self, self,
@ -84,7 +111,6 @@ class Attention(nn.Module):
optimized_attention=None, optimized_attention=None,
): ):
batch_size, seq_length, _ = hidden_states.shape batch_size, seq_length, _ = hidden_states.shape
xq = self.q_proj(hidden_states) xq = self.q_proj(hidden_states)
xk = self.k_proj(hidden_states) xk = self.k_proj(hidden_states)
xv = self.v_proj(hidden_states) xv = self.v_proj(hidden_states)
@ -108,9 +134,13 @@ class MLP(nn.Module):
self.gate_proj = ops.Linear(config.hidden_size, config.intermediate_size, bias=False, device=device, dtype=dtype) self.gate_proj = ops.Linear(config.hidden_size, config.intermediate_size, bias=False, device=device, dtype=dtype)
self.up_proj = ops.Linear(config.hidden_size, config.intermediate_size, bias=False, device=device, dtype=dtype) self.up_proj = ops.Linear(config.hidden_size, config.intermediate_size, bias=False, device=device, dtype=dtype)
self.down_proj = ops.Linear(config.intermediate_size, config.hidden_size, bias=False, device=device, dtype=dtype) self.down_proj = ops.Linear(config.intermediate_size, config.hidden_size, bias=False, device=device, dtype=dtype)
if config.mlp_activation == "silu":
self.activation = torch.nn.functional.silu
elif config.mlp_activation == "gelu_pytorch_tanh":
self.activation = lambda a: torch.nn.functional.gelu(a, approximate="tanh")
def forward(self, x): def forward(self, x):
return self.down_proj(F.silu(self.gate_proj(x)) * self.up_proj(x)) return self.down_proj(self.activation(self.gate_proj(x)) * self.up_proj(x))
class TransformerBlock(nn.Module): class TransformerBlock(nn.Module):
def __init__(self, config: Llama2Config, device=None, dtype=None, ops: Any = None): def __init__(self, config: Llama2Config, device=None, dtype=None, ops: Any = None):
@ -146,6 +176,45 @@ class TransformerBlock(nn.Module):
return x return x
class TransformerBlockGemma2(nn.Module):
def __init__(self, config: Llama2Config, device=None, dtype=None, ops: Any = None):
super().__init__()
self.self_attn = Attention(config, device=device, dtype=dtype, ops=ops)
self.mlp = MLP(config, device=device, dtype=dtype, ops=ops)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
self.pre_feedforward_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
self.post_feedforward_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
def forward(
self,
x: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
freqs_cis: Optional[torch.Tensor] = None,
optimized_attention=None,
):
# Self Attention
residual = x
x = self.input_layernorm(x)
x = self.self_attn(
hidden_states=x,
attention_mask=attention_mask,
freqs_cis=freqs_cis,
optimized_attention=optimized_attention,
)
x = self.post_attention_layernorm(x)
x = residual + x
# MLP
residual = x
x = self.pre_feedforward_layernorm(x)
x = self.mlp(x)
x = self.post_feedforward_layernorm(x)
x = residual + x
return x
class Llama2_(nn.Module): class Llama2_(nn.Module):
def __init__(self, config, device=None, dtype=None, ops=None): def __init__(self, config, device=None, dtype=None, ops=None):
super().__init__() super().__init__()
@ -158,17 +227,27 @@ class Llama2_(nn.Module):
device=device, device=device,
dtype=dtype dtype=dtype
) )
if self.config.transformer_type == "gemma2":
transformer = TransformerBlockGemma2
self.normalize_in = True
else:
transformer = TransformerBlock
self.normalize_in = False
self.layers = nn.ModuleList([ self.layers = nn.ModuleList([
TransformerBlock(config, device=device, dtype=dtype, ops=ops) transformer(config, device=device, dtype=dtype, ops=ops)
for _ in range(config.num_hidden_layers) for _ in range(config.num_hidden_layers)
]) ])
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, device=device, dtype=dtype) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
# self.lm_head = ops.Linear(config.hidden_size, config.vocab_size, bias=False, device=device, dtype=dtype) # self.lm_head = ops.Linear(config.hidden_size, config.vocab_size, bias=False, device=device, dtype=dtype)
def forward(self, x, attention_mask=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None): def forward(self, x, attention_mask=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None):
x = self.embed_tokens(x, out_dtype=dtype) x = self.embed_tokens(x, out_dtype=dtype)
freqs_cis = precompute_freqs_cis(self.config.hidden_size // self.config.num_attention_heads, if self.normalize_in:
x *= self.config.hidden_size ** 0.5
freqs_cis = precompute_freqs_cis(self.config.head_dim,
x.shape[1], x.shape[1],
self.config.rope_theta, self.config.rope_theta,
device=x.device) device=x.device)
@ -206,16 +285,7 @@ class Llama2_(nn.Module):
return x, intermediate return x, intermediate
class BaseLlama:
class Llama2(torch.nn.Module):
def __init__(self, config_dict, dtype, device, operations):
super().__init__()
config = Llama2Config(**config_dict)
self.num_layers = config.num_hidden_layers
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
self.dtype = dtype
def get_input_embeddings(self): def get_input_embeddings(self):
return self.model.embed_tokens return self.model.embed_tokens
@ -224,3 +294,23 @@ class Llama2(torch.nn.Module):
def forward(self, input_ids, *args, **kwargs): def forward(self, input_ids, *args, **kwargs):
return self.model(input_ids, *args, **kwargs) return self.model(input_ids, *args, **kwargs)
class Llama2(BaseLlama, torch.nn.Module):
def __init__(self, config_dict, dtype, device, operations):
super().__init__()
config = Llama2Config(**config_dict)
self.num_layers = config.num_hidden_layers
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
self.dtype = dtype
class Gemma2_2B(BaseLlama, torch.nn.Module):
def __init__(self, config_dict, dtype, device, operations):
super().__init__()
config = Gemma2_2B_Config(**config_dict)
self.num_layers = config.num_hidden_layers
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
self.dtype = dtype

View File

@ -0,0 +1,44 @@
from comfy import sd1_clip
from .spiece_tokenizer import SPieceTokenizer
import comfy.text_encoders.llama
class Gemma2BTokenizer(sd1_clip.SDTokenizer):
def __init__(self, embedding_directory=None, tokenizer_data={}):
tokenizer = tokenizer_data.get("spiece_model", None)
super().__init__(tokenizer, pad_with_end=False, embedding_size=2304, embedding_key='gemma2_2b', tokenizer_class=SPieceTokenizer, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_args={"add_bos": True, "add_eos": False})
def state_dict(self):
return {"spiece_model": self.tokenizer.serialize_model()}
class LuminaTokenizer(sd1_clip.SD1Tokenizer):
def __init__(self, embedding_directory=None, tokenizer_data={}):
super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="gemma2_2b", tokenizer=Gemma2BTokenizer)
class Gemma2_2BModel(sd1_clip.SDClipModel):
def __init__(self, device="cpu", layer="hidden", layer_idx=-2, dtype=None, attention_mask=True, model_options={}):
llama_scaled_fp8 = model_options.get("llama_scaled_fp8", None)
if llama_scaled_fp8 is not None:
model_options = model_options.copy()
model_options["scaled_fp8"] = llama_scaled_fp8
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"start": 2, "pad": 0}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Gemma2_2B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options)
class LuminaModel(sd1_clip.SD1ClipModel):
def __init__(self, device="cpu", dtype=None, model_options={}):
super().__init__(device=device, dtype=dtype, name="gemma2_2b", clip_model=Gemma2_2BModel, model_options=model_options)
def te(dtype_llama=None, llama_scaled_fp8=None):
class LuminaTEModel_(LuminaModel):
def __init__(self, device="cpu", dtype=None, model_options={}):
if llama_scaled_fp8 is not None and "llama_scaled_fp8" not in model_options:
model_options = model_options.copy()
model_options["llama_scaled_fp8"] = llama_scaled_fp8
if dtype_llama is not None:
dtype = dtype_llama
super().__init__(device=device, dtype=dtype, model_options=model_options)
return LuminaTEModel_

View File

@ -1,21 +1,21 @@
import torch import torch
class SPieceTokenizer: class SPieceTokenizer:
add_eos = True
@staticmethod @staticmethod
def from_pretrained(path): def from_pretrained(path, **kwargs):
return SPieceTokenizer(path) return SPieceTokenizer(path, **kwargs)
def __init__(self, tokenizer_path): def __init__(self, tokenizer_path, add_bos=False, add_eos=True):
self.add_bos = add_bos
self.add_eos = add_eos
import sentencepiece import sentencepiece
if torch.is_tensor(tokenizer_path): if torch.is_tensor(tokenizer_path):
tokenizer_path = tokenizer_path.numpy().tobytes() tokenizer_path = tokenizer_path.numpy().tobytes()
if isinstance(tokenizer_path, bytes): if isinstance(tokenizer_path, bytes):
self.tokenizer = sentencepiece.SentencePieceProcessor(model_proto=tokenizer_path, add_eos=self.add_eos) self.tokenizer = sentencepiece.SentencePieceProcessor(model_proto=tokenizer_path, add_bos=self.add_bos, add_eos=self.add_eos)
else: else:
self.tokenizer = sentencepiece.SentencePieceProcessor(model_file=tokenizer_path, add_eos=self.add_eos) self.tokenizer = sentencepiece.SentencePieceProcessor(model_file=tokenizer_path, add_bos=self.add_bos, add_eos=self.add_eos)
def get_vocab(self): def get_vocab(self):
out = {} out = {}

View File

@ -203,7 +203,7 @@ class T5Stack(torch.nn.Module):
mask = None mask = None
if attention_mask is not None: if attention_mask is not None:
mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]) mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
mask = mask.masked_fill(mask.to(torch.bool), float("-inf")) mask = mask.masked_fill(mask.to(torch.bool), -torch.finfo(x.dtype).max)
intermediate = None intermediate = None
optimized_attention = optimized_attention_for_device(x.device, mask=attention_mask is not None, small_input=True) optimized_attention = optimized_attention_for_device(x.device, mask=attention_mask is not None, small_input=True)

View File

@ -43,13 +43,23 @@ if hasattr(torch.serialization, "add_safe_globals"): # TODO: this was added in
torch.serialization.add_safe_globals([ModelCheckpoint, scalar, dtype, Float64DType, encode]) torch.serialization.add_safe_globals([ModelCheckpoint, scalar, dtype, Float64DType, encode])
ALWAYS_SAFE_LOAD = True ALWAYS_SAFE_LOAD = True
logging.info("Checkpoint files will always be loaded safely.") logging.info("Checkpoint files will always be loaded safely.")
else:
logging.info("Warning, you are using an old pytorch version and some ckpt/pt files might be loaded unsafely. Upgrading to 2.4 or above is recommended.")
def load_torch_file(ckpt, safe_load=False, device=None): def load_torch_file(ckpt, safe_load=False, device=None):
if device is None: if device is None:
device = torch.device("cpu") device = torch.device("cpu")
if ckpt.lower().endswith(".safetensors") or ckpt.lower().endswith(".sft"): if ckpt.lower().endswith(".safetensors") or ckpt.lower().endswith(".sft"):
sd = safetensors.torch.load_file(ckpt, device=device.type) try:
sd = safetensors.torch.load_file(ckpt, device=device.type)
except Exception as e:
if len(e.args) > 0:
message = e.args[0]
if "HeaderTooLarge" in message:
raise ValueError("{}\n\nFile path: {}\n\nThe safetensors file is corrupt or invalid. Make sure this is actually a safetensors file and not a ckpt or pt or other filetype.".format(message, ckpt))
if "MetadataIncompleteBuffer" in message:
raise ValueError("{}\n\nFile path: {}\n\nThe safetensors file is corrupt/incomplete. Check the file size and make sure you have copied/downloaded it correctly.".format(message, ckpt))
raise e
else: else:
if safe_load or ALWAYS_SAFE_LOAD: if safe_load or ALWAYS_SAFE_LOAD:
pl_sd = torch.load(ckpt, map_location=device, weights_only=True) pl_sd = torch.load(ckpt, map_location=device, weights_only=True)

View File

@ -38,7 +38,26 @@ class FluxGuidance:
return (c, ) return (c, )
class FluxDisableGuidance:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"conditioning": ("CONDITIONING", ),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "append"
CATEGORY = "advanced/conditioning/flux"
DESCRIPTION = "This node completely disables the guidance embed on Flux and Flux like models"
def append(self, conditioning):
c = node_helpers.conditioning_set_values(conditioning, {"guidance": None})
return (c, )
NODE_CLASS_MAPPINGS = { NODE_CLASS_MAPPINGS = {
"CLIPTextEncodeFlux": CLIPTextEncodeFlux, "CLIPTextEncodeFlux": CLIPTextEncodeFlux,
"FluxGuidance": FluxGuidance, "FluxGuidance": FluxGuidance,
"FluxDisableGuidance": FluxDisableGuidance,
} }

View File

@ -2,10 +2,14 @@ import comfy.utils
import comfy_extras.nodes_post_processing import comfy_extras.nodes_post_processing
import torch import torch
def reshape_latent_to(target_shape, latent):
def reshape_latent_to(target_shape, latent, repeat_batch=True):
if latent.shape[1:] != target_shape[1:]: if latent.shape[1:] != target_shape[1:]:
latent = comfy.utils.common_upscale(latent, target_shape[3], target_shape[2], "bilinear", "center") latent = comfy.utils.common_upscale(latent, target_shape[-1], target_shape[-2], "bilinear", "center")
return comfy.utils.repeat_to_batch_size(latent, target_shape[0]) if repeat_batch:
return comfy.utils.repeat_to_batch_size(latent, target_shape[0])
else:
return latent
class LatentAdd: class LatentAdd:
@ -116,8 +120,7 @@ class LatentBatch:
s1 = samples1["samples"] s1 = samples1["samples"]
s2 = samples2["samples"] s2 = samples2["samples"]
if s1.shape[1:] != s2.shape[1:]: s2 = reshape_latent_to(s1.shape, s2, repeat_batch=False)
s2 = comfy.utils.common_upscale(s2, s1.shape[3], s1.shape[2], "bilinear", "center")
s = torch.cat((s1, s2), dim=0) s = torch.cat((s1, s2), dim=0)
samples_out["samples"] = s samples_out["samples"] = s
samples_out["batch_index"] = samples1.get("batch_index", [x for x in range(0, s1.shape[0])]) + samples2.get("batch_index", [x for x in range(0, s2.shape[0])]) samples_out["batch_index"] = samples1.get("batch_index", [x for x in range(0, s1.shape[0])]) + samples2.get("batch_index", [x for x in range(0, s2.shape[0])])

View File

@ -19,11 +19,7 @@ class Load3D():
"image": ("LOAD_3D", {}), "image": ("LOAD_3D", {}),
"width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), "width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
"height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), "height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
"show_grid": ([True, False],),
"camera_type": (["perspective", "orthographic"],),
"view": (["front", "right", "top", "isometric"],),
"material": (["original", "normal", "wireframe", "depth"],), "material": (["original", "normal", "wireframe", "depth"],),
"bg_color": ("STRING", {"default": "#000000", "multiline": False}),
"light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}), "light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}),
"up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],), "up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],),
"fov": ("INT", {"default": 75, "min": 10, "max": 150, "step": 1}), "fov": ("INT", {"default": 75, "min": 10, "max": 150, "step": 1}),
@ -69,14 +65,9 @@ class Load3DAnimation():
"image": ("LOAD_3D_ANIMATION", {}), "image": ("LOAD_3D_ANIMATION", {}),
"width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), "width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
"height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), "height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
"show_grid": ([True, False],),
"camera_type": (["perspective", "orthographic"],),
"view": (["front", "right", "top", "isometric"],),
"material": (["original", "normal", "wireframe", "depth"],), "material": (["original", "normal", "wireframe", "depth"],),
"bg_color": ("STRING", {"default": "#000000", "multiline": False}),
"light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}), "light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}),
"up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],), "up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],),
"animation_speed": (["0.1", "0.5", "1", "1.5", "2"], {"default": "1"}),
"fov": ("INT", {"default": 75, "min": 10, "max": 150, "step": 1}), "fov": ("INT", {"default": 75, "min": 10, "max": 150, "step": 1}),
}} }}
@ -109,11 +100,29 @@ class Preview3D():
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { return {"required": {
"model_file": ("STRING", {"default": "", "multiline": False}), "model_file": ("STRING", {"default": "", "multiline": False}),
"show_grid": ([True, False],),
"camera_type": (["perspective", "orthographic"],),
"view": (["front", "right", "top", "isometric"],),
"material": (["original", "normal", "wireframe", "depth"],), "material": (["original", "normal", "wireframe", "depth"],),
"bg_color": ("STRING", {"default": "#000000", "multiline": False}), "light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}),
"up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],),
"fov": ("INT", {"default": 75, "min": 10, "max": 150, "step": 1}),
}}
OUTPUT_NODE = True
RETURN_TYPES = ()
CATEGORY = "3d"
FUNCTION = "process"
EXPERIMENTAL = True
def process(self, model_file, **kwargs):
return {"ui": {"model_file": [model_file]}, "result": ()}
class Preview3DAnimation():
@classmethod
def INPUT_TYPES(s):
return {"required": {
"model_file": ("STRING", {"default": "", "multiline": False}),
"material": (["original", "normal", "wireframe", "depth"],),
"light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}), "light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}),
"up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],), "up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],),
"fov": ("INT", {"default": 75, "min": 10, "max": 150, "step": 1}), "fov": ("INT", {"default": 75, "min": 10, "max": 150, "step": 1}),
@ -133,11 +142,13 @@ class Preview3D():
NODE_CLASS_MAPPINGS = { NODE_CLASS_MAPPINGS = {
"Load3D": Load3D, "Load3D": Load3D,
"Load3DAnimation": Load3DAnimation, "Load3DAnimation": Load3DAnimation,
"Preview3D": Preview3D "Preview3D": Preview3D,
"Preview3DAnimation": Preview3DAnimation
} }
NODE_DISPLAY_NAME_MAPPINGS = { NODE_DISPLAY_NAME_MAPPINGS = {
"Load3D": "Load 3D", "Load3D": "Load 3D",
"Load3DAnimation": "Load 3D - Animation", "Load3DAnimation": "Load 3D - Animation",
"Preview3D": "Preview 3D" "Preview3D": "Preview 3D",
"Preview3DAnimation": "Preview 3D - Animation"
} }

View File

@ -196,6 +196,54 @@ class ModelMergeLTXV(comfy_extras.nodes_model_merging.ModelMergeBlocks):
return {"required": arg_dict} return {"required": arg_dict}
class ModelMergeCosmos7B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific"
@classmethod
def INPUT_TYPES(s):
arg_dict = { "model1": ("MODEL",),
"model2": ("MODEL",)}
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
arg_dict["pos_embedder."] = argument
arg_dict["extra_pos_embedder."] = argument
arg_dict["x_embedder."] = argument
arg_dict["t_embedder."] = argument
arg_dict["affline_norm."] = argument
for i in range(28):
arg_dict["blocks.block{}.".format(i)] = argument
arg_dict["final_layer."] = argument
return {"required": arg_dict}
class ModelMergeCosmos14B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific"
@classmethod
def INPUT_TYPES(s):
arg_dict = { "model1": ("MODEL",),
"model2": ("MODEL",)}
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
arg_dict["pos_embedder."] = argument
arg_dict["extra_pos_embedder."] = argument
arg_dict["x_embedder."] = argument
arg_dict["t_embedder."] = argument
arg_dict["affline_norm."] = argument
for i in range(36):
arg_dict["blocks.block{}.".format(i)] = argument
arg_dict["final_layer."] = argument
return {"required": arg_dict}
NODE_CLASS_MAPPINGS = { NODE_CLASS_MAPPINGS = {
"ModelMergeSD1": ModelMergeSD1, "ModelMergeSD1": ModelMergeSD1,
"ModelMergeSD2": ModelMergeSD1, #SD1 and SD2 have the same blocks "ModelMergeSD2": ModelMergeSD1, #SD1 and SD2 have the same blocks
@ -206,4 +254,6 @@ NODE_CLASS_MAPPINGS = {
"ModelMergeSD35_Large": ModelMergeSD35_Large, "ModelMergeSD35_Large": ModelMergeSD35_Large,
"ModelMergeMochiPreview": ModelMergeMochiPreview, "ModelMergeMochiPreview": ModelMergeMochiPreview,
"ModelMergeLTXV": ModelMergeLTXV, "ModelMergeLTXV": ModelMergeLTXV,
"ModelMergeCosmos7B": ModelMergeCosmos7B,
"ModelMergeCosmos14B": ModelMergeCosmos14B,
} }

View File

@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is # This file is automatically generated by the build process when version is
# updated in pyproject.toml. # updated in pyproject.toml.
__version__ = "0.3.12" __version__ = "0.3.14"

View File

@ -7,11 +7,18 @@ import logging
from typing import Literal from typing import Literal
from collections.abc import Collection from collections.abc import Collection
supported_pt_extensions: set[str] = {'.ckpt', '.pt', '.bin', '.pth', '.safetensors', '.pkl', '.sft'} from comfy.cli_args import args
supported_pt_extensions: set[str] = {'.ckpt', '.pt', '.pt2', '.bin', '.pth', '.safetensors', '.pkl', '.sft'}
folder_names_and_paths: dict[str, tuple[list[str], set[str]]] = {} folder_names_and_paths: dict[str, tuple[list[str], set[str]]] = {}
base_path = os.path.dirname(os.path.realpath(__file__)) # --base-directory - Resets all default paths configured in folder_paths with a new base path
if args.base_directory:
base_path = os.path.abspath(args.base_directory)
else:
base_path = os.path.dirname(os.path.realpath(__file__))
models_dir = os.path.join(base_path, "models") models_dir = os.path.join(base_path, "models")
folder_names_and_paths["checkpoints"] = ([os.path.join(models_dir, "checkpoints")], supported_pt_extensions) folder_names_and_paths["checkpoints"] = ([os.path.join(models_dir, "checkpoints")], supported_pt_extensions)
folder_names_and_paths["configs"] = ([os.path.join(models_dir, "configs")], [".yaml"]) folder_names_and_paths["configs"] = ([os.path.join(models_dir, "configs")], [".yaml"])
@ -39,10 +46,10 @@ folder_names_and_paths["photomaker"] = ([os.path.join(models_dir, "photomaker")]
folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers")], {""}) folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers")], {""})
output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output") output_directory = os.path.join(base_path, "output")
temp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") temp_directory = os.path.join(base_path, "temp")
input_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input") input_directory = os.path.join(base_path, "input")
user_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "user") user_directory = os.path.join(base_path, "user")
filename_list_cache: dict[str, tuple[list[str], dict[str, float], float]] = {} filename_list_cache: dict[str, tuple[list[str], dict[str, float], float]] = {}

View File

@ -12,7 +12,10 @@ MAX_PREVIEW_RESOLUTION = args.preview_size
def preview_to_image(latent_image): def preview_to_image(latent_image):
latents_ubyte = (((latent_image + 1.0) / 2.0).clamp(0, 1) # change scale from -1..1 to 0..1 latents_ubyte = (((latent_image + 1.0) / 2.0).clamp(0, 1) # change scale from -1..1 to 0..1
.mul(0xFF) # to 0..255 .mul(0xFF) # to 0..255
).to(device="cpu", dtype=torch.uint8, non_blocking=comfy.model_management.device_supports_non_blocking(latent_image.device)) )
if comfy.model_management.directml_enabled:
latents_ubyte = latents_ubyte.to(dtype=torch.uint8)
latents_ubyte = latents_ubyte.to(device="cpu", dtype=torch.uint8, non_blocking=comfy.model_management.device_supports_non_blocking(latent_image.device))
return Image.fromarray(latents_ubyte.numpy()) return Image.fromarray(latents_ubyte.numpy())

View File

@ -138,6 +138,8 @@ import server
from server import BinaryEventTypes from server import BinaryEventTypes
import nodes import nodes
import comfy.model_management import comfy.model_management
import comfyui_version
def cuda_malloc_warning(): def cuda_malloc_warning():
device = comfy.model_management.get_torch_device() device = comfy.model_management.get_torch_device()
@ -292,6 +294,7 @@ def start_comfyui(asyncio_loop=None):
if __name__ == "__main__": if __name__ == "__main__":
# Running directly, just start ComfyUI. # Running directly, just start ComfyUI.
logging.info("ComfyUI version: {}".format(comfyui_version.__version__))
event_loop, _, start_all_func = start_comfyui() event_loop, _, start_all_func = start_comfyui()
try: try:
event_loop.run_until_complete(start_all_func()) event_loop.run_until_complete(start_all_func())

View File

@ -63,6 +63,8 @@ class CLIPTextEncode(ComfyNodeABC):
DESCRIPTION = "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images." DESCRIPTION = "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images."
def encode(self, clip, text): def encode(self, clip, text):
if clip is None:
raise RuntimeError("ERROR: clip input is invalid: None\n\nIf the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model.")
tokens = clip.tokenize(text) tokens = clip.tokenize(text)
return (clip.encode_from_tokens_scheduled(tokens), ) return (clip.encode_from_tokens_scheduled(tokens), )
@ -912,7 +914,7 @@ class CLIPLoader:
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ), return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ),
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos"], ), "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2"], ),
}, },
"optional": { "optional": {
"device": (["default", "cpu"], {"advanced": True}), "device": (["default", "cpu"], {"advanced": True}),
@ -922,7 +924,7 @@ class CLIPLoader:
CATEGORY = "advanced/loaders" CATEGORY = "advanced/loaders"
DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 / clip-g / clip-l\nstable_audio: t5\nmochi: t5\ncosmos: old t5 xxl" DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 / clip-g / clip-l\nstable_audio: t5\nmochi: t5\ncosmos: old t5 xxl\nlumina2: gemma 2 2B"
def load_clip(self, clip_name, type="stable_diffusion", device="default"): def load_clip(self, clip_name, type="stable_diffusion", device="default"):
if type == "stable_cascade": if type == "stable_cascade":
@ -937,6 +939,10 @@ class CLIPLoader:
clip_type = comfy.sd.CLIPType.LTXV clip_type = comfy.sd.CLIPType.LTXV
elif type == "pixart": elif type == "pixart":
clip_type = comfy.sd.CLIPType.PIXART clip_type = comfy.sd.CLIPType.PIXART
elif type == "cosmos":
clip_type = comfy.sd.CLIPType.COSMOS
elif type == "lumina2":
clip_type = comfy.sd.CLIPType.LUMINA2
else: else:
clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION
@ -1058,10 +1064,11 @@ class StyleModelApply:
for t in conditioning: for t in conditioning:
(txt, keys) = t (txt, keys) = t
keys = keys.copy() keys = keys.copy()
if strength_type == "attn_bias" and strength != 1.0: # even if the strength is 1.0 (i.e, no change), if there's already a mask, we have to add to it
if "attention_mask" in keys or (strength_type == "attn_bias" and strength != 1.0):
# math.log raises an error if the argument is zero # math.log raises an error if the argument is zero
# torch.log returns -inf, which is what we want # torch.log returns -inf, which is what we want
attn_bias = torch.log(torch.Tensor([strength])) attn_bias = torch.log(torch.Tensor([strength if strength_type == "attn_bias" else 1.0]))
# get the size of the mask image # get the size of the mask image
mask_ref_size = keys.get("attention_mask_img_shape", (1, 1)) mask_ref_size = keys.get("attention_mask_img_shape", (1, 1))
n_ref = mask_ref_size[0] * mask_ref_size[1] n_ref = mask_ref_size[0] * mask_ref_size[1]

View File

@ -1,6 +1,6 @@
[project] [project]
name = "ComfyUI" name = "ComfyUI"
version = "0.3.12" version = "0.3.14"
readme = "README.md" readme = "README.md"
license = { file = "LICENSE" } license = { file = "LICENSE" }
requires-python = ">=3.9" requires-python = ">=3.9"

View File

@ -52,6 +52,20 @@ async def cache_control(request: web.Request, handler):
response.headers.setdefault('Cache-Control', 'no-cache') response.headers.setdefault('Cache-Control', 'no-cache')
return response return response
@web.middleware
async def compress_body(request: web.Request, handler):
accept_encoding = request.headers.get("Accept-Encoding", "")
response: web.Response = await handler(request)
if not isinstance(response, web.Response):
return response
if response.content_type not in ["application/json", "text/plain"]:
return response
if response.body and "gzip" in accept_encoding:
response.enable_compression()
return response
def create_cors_middleware(allowed_origin: str): def create_cors_middleware(allowed_origin: str):
@web.middleware @web.middleware
async def cors_middleware(request: web.Request, handler): async def cors_middleware(request: web.Request, handler):
@ -136,7 +150,8 @@ class PromptServer():
PromptServer.instance = self PromptServer.instance = self
mimetypes.init() mimetypes.init()
mimetypes.types_map['.js'] = 'application/javascript; charset=utf-8' mimetypes.add_type('application/javascript; charset=utf-8', '.js')
mimetypes.add_type('image/webp', '.webp')
self.user_manager = UserManager() self.user_manager = UserManager()
self.model_file_manager = ModelFileManager() self.model_file_manager = ModelFileManager()
@ -150,6 +165,9 @@ class PromptServer():
self.number = 0 self.number = 0
middlewares = [cache_control] middlewares = [cache_control]
if args.enable_compress_response_body:
middlewares.append(compress_body)
if args.enable_cors_header: if args.enable_cors_header:
middlewares.append(create_cors_middleware(args.enable_cors_header)) middlewares.append(create_cors_middleware(args.enable_cors_header))
else: else:
@ -329,6 +347,9 @@ class PromptServer():
original_ref = json.loads(post.get("original_ref")) original_ref = json.loads(post.get("original_ref"))
filename, output_dir = folder_paths.annotated_filepath(original_ref['filename']) filename, output_dir = folder_paths.annotated_filepath(original_ref['filename'])
if not filename:
return web.Response(status=400)
# validation for security: prevent accessing arbitrary path # validation for security: prevent accessing arbitrary path
if filename[0] == '/' or '..' in filename: if filename[0] == '/' or '..' in filename:
return web.Response(status=400) return web.Response(status=400)
@ -370,6 +391,9 @@ class PromptServer():
filename = request.rel_url.query["filename"] filename = request.rel_url.query["filename"]
filename,output_dir = folder_paths.annotated_filepath(filename) filename,output_dir = folder_paths.annotated_filepath(filename)
if not filename:
return web.Response(status=400)
# validation for security: prevent accessing arbitrary path # validation for security: prevent accessing arbitrary path
if filename[0] == '/' or '..' in filename: if filename[0] == '/' or '..' in filename:
return web.Response(status=400) return web.Response(status=400)

View File

@ -2,39 +2,146 @@ import pytest
from aiohttp import web from aiohttp import web
from unittest.mock import patch from unittest.mock import patch
from app.custom_node_manager import CustomNodeManager from app.custom_node_manager import CustomNodeManager
import json
pytestmark = ( pytestmark = (
pytest.mark.asyncio pytest.mark.asyncio
) # This applies the asyncio mark to all test functions in the module ) # This applies the asyncio mark to all test functions in the module
@pytest.fixture @pytest.fixture
def custom_node_manager(): def custom_node_manager():
return CustomNodeManager() return CustomNodeManager()
@pytest.fixture @pytest.fixture
def app(custom_node_manager): def app(custom_node_manager):
app = web.Application() app = web.Application()
routes = web.RouteTableDef() routes = web.RouteTableDef()
custom_node_manager.add_routes(routes, app, [("ComfyUI-TestExtension1", "ComfyUI-TestExtension1")]) custom_node_manager.add_routes(
routes, app, [("ComfyUI-TestExtension1", "ComfyUI-TestExtension1")]
)
app.add_routes(routes) app.add_routes(routes)
return app return app
async def test_get_workflow_templates(aiohttp_client, app, tmp_path): async def test_get_workflow_templates(aiohttp_client, app, tmp_path):
client = await aiohttp_client(app) client = await aiohttp_client(app)
# Setup temporary custom nodes file structure with 1 workflow file # Setup temporary custom nodes file structure with 1 workflow file
custom_nodes_dir = tmp_path / "custom_nodes" custom_nodes_dir = tmp_path / "custom_nodes"
example_workflows_dir = custom_nodes_dir / "ComfyUI-TestExtension1" / "example_workflows" example_workflows_dir = (
custom_nodes_dir / "ComfyUI-TestExtension1" / "example_workflows"
)
example_workflows_dir.mkdir(parents=True) example_workflows_dir.mkdir(parents=True)
template_file = example_workflows_dir / "workflow1.json" template_file = example_workflows_dir / "workflow1.json"
template_file.write_text('') template_file.write_text("")
with patch('folder_paths.folder_names_and_paths', { with patch(
'custom_nodes': ([str(custom_nodes_dir)], None) "folder_paths.folder_names_and_paths",
}): {"custom_nodes": ([str(custom_nodes_dir)], None)},
response = await client.get('/workflow_templates') ):
response = await client.get("/workflow_templates")
assert response.status == 200 assert response.status == 200
workflows_dict = await response.json() workflows_dict = await response.json()
assert isinstance(workflows_dict, dict) assert isinstance(workflows_dict, dict)
assert "ComfyUI-TestExtension1" in workflows_dict assert "ComfyUI-TestExtension1" in workflows_dict
assert isinstance(workflows_dict["ComfyUI-TestExtension1"], list) assert isinstance(workflows_dict["ComfyUI-TestExtension1"], list)
assert workflows_dict["ComfyUI-TestExtension1"][0] == "workflow1" assert workflows_dict["ComfyUI-TestExtension1"][0] == "workflow1"
async def test_build_translations_empty_when_no_locales(custom_node_manager, tmp_path):
custom_nodes_dir = tmp_path / "custom_nodes"
custom_nodes_dir.mkdir(parents=True)
with patch("folder_paths.get_folder_paths", return_value=[str(custom_nodes_dir)]):
translations = custom_node_manager.build_translations()
assert translations == {}
async def test_build_translations_loads_all_files(custom_node_manager, tmp_path):
# Setup test directory structure
custom_nodes_dir = tmp_path / "custom_nodes" / "test-extension"
locales_dir = custom_nodes_dir / "locales" / "en"
locales_dir.mkdir(parents=True)
# Create test translation files
main_content = {"title": "Test Extension"}
(locales_dir / "main.json").write_text(json.dumps(main_content))
node_defs = {"node1": "Node 1"}
(locales_dir / "nodeDefs.json").write_text(json.dumps(node_defs))
commands = {"cmd1": "Command 1"}
(locales_dir / "commands.json").write_text(json.dumps(commands))
settings = {"setting1": "Setting 1"}
(locales_dir / "settings.json").write_text(json.dumps(settings))
with patch(
"folder_paths.get_folder_paths", return_value=[tmp_path / "custom_nodes"]
):
translations = custom_node_manager.build_translations()
assert translations == {
"en": {
"title": "Test Extension",
"nodeDefs": {"node1": "Node 1"},
"commands": {"cmd1": "Command 1"},
"settings": {"setting1": "Setting 1"},
}
}
async def test_build_translations_handles_invalid_json(custom_node_manager, tmp_path):
# Setup test directory structure
custom_nodes_dir = tmp_path / "custom_nodes" / "test-extension"
locales_dir = custom_nodes_dir / "locales" / "en"
locales_dir.mkdir(parents=True)
# Create valid main.json
main_content = {"title": "Test Extension"}
(locales_dir / "main.json").write_text(json.dumps(main_content))
# Create invalid JSON file
(locales_dir / "nodeDefs.json").write_text("invalid json{")
with patch(
"folder_paths.get_folder_paths", return_value=[tmp_path / "custom_nodes"]
):
translations = custom_node_manager.build_translations()
assert translations == {
"en": {
"title": "Test Extension",
}
}
async def test_build_translations_merges_multiple_extensions(
custom_node_manager, tmp_path
):
# Setup test directory structure for two extensions
custom_nodes_dir = tmp_path / "custom_nodes"
ext1_dir = custom_nodes_dir / "extension1" / "locales" / "en"
ext2_dir = custom_nodes_dir / "extension2" / "locales" / "en"
ext1_dir.mkdir(parents=True)
ext2_dir.mkdir(parents=True)
# Create translation files for extension 1
ext1_main = {"title": "Extension 1", "shared": "Original"}
(ext1_dir / "main.json").write_text(json.dumps(ext1_main))
# Create translation files for extension 2
ext2_main = {"description": "Extension 2", "shared": "Override"}
(ext2_dir / "main.json").write_text(json.dumps(ext2_main))
with patch("folder_paths.get_folder_paths", return_value=[str(custom_nodes_dir)]):
translations = custom_node_manager.build_translations()
assert translations == {
"en": {
"title": "Extension 1",
"description": "Extension 2",
"shared": "Override", # Second extension should override first
}
}

View File

@ -1,19 +1,23 @@
### 🗻 This file is created through the spirit of Mount Fuji at its peak ### 🗻 This file is created through the spirit of Mount Fuji at its peak
# TODO(yoland): clean up this after I get back down # TODO(yoland): clean up this after I get back down
import sys
import pytest import pytest
import os import os
import tempfile import tempfile
from unittest.mock import patch from unittest.mock import patch
from importlib import reload
import folder_paths import folder_paths
import comfy.cli_args
from comfy.options import enable_args_parsing
enable_args_parsing()
@pytest.fixture() @pytest.fixture()
def clear_folder_paths(): def clear_folder_paths():
# Clear the global dictionary before each test to ensure isolation # Reload the module after each test to ensure isolation
original = folder_paths.folder_names_and_paths.copy()
folder_paths.folder_names_and_paths.clear()
yield yield
folder_paths.folder_names_and_paths = original reload(folder_paths)
@pytest.fixture @pytest.fixture
def temp_dir(): def temp_dir():
@ -21,7 +25,21 @@ def temp_dir():
yield tmpdirname yield tmpdirname
def test_get_directory_by_type(): @pytest.fixture
def set_base_dir():
def _set_base_dir(base_dir):
# Mock CLI args
with patch.object(sys, 'argv', ["main.py", "--base-directory", base_dir]):
reload(comfy.cli_args)
reload(folder_paths)
yield _set_base_dir
# Reload the modules after each test to ensure isolation
with patch.object(sys, 'argv', ["main.py"]):
reload(comfy.cli_args)
reload(folder_paths)
def test_get_directory_by_type(clear_folder_paths):
test_dir = "/test/dir" test_dir = "/test/dir"
folder_paths.set_output_directory(test_dir) folder_paths.set_output_directory(test_dir)
assert folder_paths.get_directory_by_type("output") == test_dir assert folder_paths.get_directory_by_type("output") == test_dir
@ -96,3 +114,49 @@ def test_get_save_image_path(temp_dir):
assert counter == 1 assert counter == 1
assert subfolder == "" assert subfolder == ""
assert filename_prefix == "test" assert filename_prefix == "test"
def test_base_path_changes(set_base_dir):
test_dir = os.path.abspath("/test/dir")
set_base_dir(test_dir)
assert folder_paths.base_path == test_dir
assert folder_paths.models_dir == os.path.join(test_dir, "models")
assert folder_paths.input_directory == os.path.join(test_dir, "input")
assert folder_paths.output_directory == os.path.join(test_dir, "output")
assert folder_paths.temp_directory == os.path.join(test_dir, "temp")
assert folder_paths.user_directory == os.path.join(test_dir, "user")
assert os.path.join(test_dir, "custom_nodes") in folder_paths.get_folder_paths("custom_nodes")
for name in ["checkpoints", "loras", "vae", "configs", "embeddings", "controlnet", "classifiers"]:
assert folder_paths.get_folder_paths(name)[0] == os.path.join(test_dir, "models", name)
def test_base_path_change_clears_old(set_base_dir):
test_dir = os.path.abspath("/test/dir")
set_base_dir(test_dir)
assert len(folder_paths.get_folder_paths("custom_nodes")) == 1
single_model_paths = [
"checkpoints",
"loras",
"vae",
"configs",
"clip_vision",
"style_models",
"diffusers",
"vae_approx",
"gligen",
"upscale_models",
"embeddings",
"hypernetworks",
"photomaker",
"classifiers",
]
for name in single_model_paths:
assert len(folder_paths.get_folder_paths(name)) == 1
for name in ["controlnet", "diffusion_models", "text_encoders"]:
assert len(folder_paths.get_folder_paths(name)) == 2

View File

@ -1,115 +0,0 @@
import pytest
from aiohttp import web
from unittest.mock import MagicMock, patch
from api_server.routes.internal.internal_routes import InternalRoutes
from api_server.services.file_service import FileService
from folder_paths import models_dir, user_directory, output_directory
@pytest.fixture
def internal_routes():
return InternalRoutes(None)
@pytest.fixture
def aiohttp_client_factory(aiohttp_client, internal_routes):
async def _get_client():
app = internal_routes.get_app()
return await aiohttp_client(app)
return _get_client
@pytest.mark.asyncio
async def test_list_files_valid_directory(aiohttp_client_factory, internal_routes):
mock_file_list = [
{"name": "file1.txt", "path": "file1.txt", "type": "file", "size": 100},
{"name": "dir1", "path": "dir1", "type": "directory"}
]
internal_routes.file_service.list_files = MagicMock(return_value=mock_file_list)
client = await aiohttp_client_factory()
resp = await client.get('/files?directory=models')
assert resp.status == 200
data = await resp.json()
assert 'files' in data
assert len(data['files']) == 2
assert data['files'] == mock_file_list
# Check other valid directories
resp = await client.get('/files?directory=user')
assert resp.status == 200
resp = await client.get('/files?directory=output')
assert resp.status == 200
@pytest.mark.asyncio
async def test_list_files_invalid_directory(aiohttp_client_factory, internal_routes):
internal_routes.file_service.list_files = MagicMock(side_effect=ValueError("Invalid directory key"))
client = await aiohttp_client_factory()
resp = await client.get('/files?directory=invalid')
assert resp.status == 400
data = await resp.json()
assert 'error' in data
assert data['error'] == "Invalid directory key"
@pytest.mark.asyncio
async def test_list_files_exception(aiohttp_client_factory, internal_routes):
internal_routes.file_service.list_files = MagicMock(side_effect=Exception("Unexpected error"))
client = await aiohttp_client_factory()
resp = await client.get('/files?directory=models')
assert resp.status == 500
data = await resp.json()
assert 'error' in data
assert data['error'] == "Unexpected error"
@pytest.mark.asyncio
async def test_list_files_no_directory_param(aiohttp_client_factory, internal_routes):
mock_file_list = []
internal_routes.file_service.list_files = MagicMock(return_value=mock_file_list)
client = await aiohttp_client_factory()
resp = await client.get('/files')
assert resp.status == 200
data = await resp.json()
assert 'files' in data
assert len(data['files']) == 0
def test_setup_routes(internal_routes):
internal_routes.setup_routes()
routes = internal_routes.routes
assert any(route.method == 'GET' and str(route.path) == '/files' for route in routes)
def test_get_app(internal_routes):
app = internal_routes.get_app()
assert isinstance(app, web.Application)
assert internal_routes._app is not None
def test_get_app_reuse(internal_routes):
app1 = internal_routes.get_app()
app2 = internal_routes.get_app()
assert app1 is app2
@pytest.mark.asyncio
async def test_routes_added_to_app(aiohttp_client_factory, internal_routes):
client = await aiohttp_client_factory()
try:
resp = await client.get('/files')
print(f"Response received: status {resp.status}") # noqa: T201
except Exception as e:
print(f"Exception occurred during GET request: {e}") # noqa: T201
raise
assert resp.status != 404, "Route /files does not exist"
@pytest.mark.asyncio
async def test_file_service_initialization():
with patch('api_server.routes.internal.internal_routes.FileService') as MockFileService:
# Create a mock instance
mock_file_service_instance = MagicMock(spec=FileService)
MockFileService.return_value = mock_file_service_instance
internal_routes = InternalRoutes(None)
# Check if FileService was initialized with the correct parameters
MockFileService.assert_called_once_with({
"models": models_dir,
"user": user_directory,
"output": output_directory
})
# Verify that the file_service attribute of InternalRoutes is set
assert internal_routes.file_service == mock_file_service_instance

View File

@ -1,54 +0,0 @@
import pytest
from unittest.mock import MagicMock
from api_server.services.file_service import FileService
@pytest.fixture
def mock_file_system_ops():
return MagicMock()
@pytest.fixture
def file_service(mock_file_system_ops):
allowed_directories = {
"models": "/path/to/models",
"user": "/path/to/user",
"output": "/path/to/output"
}
return FileService(allowed_directories, file_system_ops=mock_file_system_ops)
def test_list_files_valid_directory(file_service, mock_file_system_ops):
mock_file_system_ops.walk_directory.return_value = [
{"name": "file1.txt", "path": "file1.txt", "type": "file", "size": 100},
{"name": "dir1", "path": "dir1", "type": "directory"}
]
result = file_service.list_files("models")
assert len(result) == 2
assert result[0]["name"] == "file1.txt"
assert result[1]["name"] == "dir1"
mock_file_system_ops.walk_directory.assert_called_once_with("/path/to/models")
def test_list_files_invalid_directory(file_service):
# Does not support walking directories outside of the allowed directories
with pytest.raises(ValueError, match="Invalid directory key"):
file_service.list_files("invalid_key")
def test_list_files_empty_directory(file_service, mock_file_system_ops):
mock_file_system_ops.walk_directory.return_value = []
result = file_service.list_files("models")
assert len(result) == 0
mock_file_system_ops.walk_directory.assert_called_once_with("/path/to/models")
@pytest.mark.parametrize("directory_key", ["models", "user", "output"])
def test_list_files_all_allowed_directories(file_service, mock_file_system_ops, directory_key):
mock_file_system_ops.walk_directory.return_value = [
{"name": f"file_{directory_key}.txt", "path": f"file_{directory_key}.txt", "type": "file", "size": 100}
]
result = file_service.list_files(directory_key)
assert len(result) == 1
assert result[0]["name"] == f"file_{directory_key}.txt"
mock_file_system_ops.walk_directory.assert_called_once_with(f"/path/to/{directory_key}")

View File

@ -0,0 +1,71 @@
from utils.json_util import merge_json_recursive
def test_merge_simple_dicts():
base = {"a": 1, "b": 2}
update = {"b": 3, "c": 4}
expected = {"a": 1, "b": 3, "c": 4}
assert merge_json_recursive(base, update) == expected
def test_merge_nested_dicts():
base = {"a": {"x": 1, "y": 2}, "b": 3}
update = {"a": {"y": 4, "z": 5}}
expected = {"a": {"x": 1, "y": 4, "z": 5}, "b": 3}
assert merge_json_recursive(base, update) == expected
def test_merge_lists():
base = {"a": [1, 2], "b": 3}
update = {"a": [3, 4]}
expected = {"a": [1, 2, 3, 4], "b": 3}
assert merge_json_recursive(base, update) == expected
def test_merge_nested_lists():
base = {"a": {"x": [1, 2]}}
update = {"a": {"x": [3, 4]}}
expected = {"a": {"x": [1, 2, 3, 4]}}
assert merge_json_recursive(base, update) == expected
def test_merge_mixed_types():
base = {"a": [1, 2], "b": {"x": 1}}
update = {"a": [3], "b": {"y": 2}}
expected = {"a": [1, 2, 3], "b": {"x": 1, "y": 2}}
assert merge_json_recursive(base, update) == expected
def test_merge_overwrite_non_dict():
base = {"a": 1}
update = {"a": {"x": 2}}
expected = {"a": {"x": 2}}
assert merge_json_recursive(base, update) == expected
def test_merge_empty_dicts():
base = {}
update = {"a": 1}
expected = {"a": 1}
assert merge_json_recursive(base, update) == expected
def test_merge_none_values():
base = {"a": None}
update = {"a": {"x": 1}}
expected = {"a": {"x": 1}}
assert merge_json_recursive(base, update) == expected
def test_merge_different_types():
base = {"a": [1, 2]}
update = {"a": "string"}
expected = {"a": "string"}
assert merge_json_recursive(base, update) == expected
def test_merge_complex_nested():
base = {"a": [1, 2], "b": {"x": [3, 4], "y": {"p": 1}}}
update = {"a": [5], "b": {"x": [6], "y": {"q": 2}}}
expected = {"a": [1, 2, 5], "b": {"x": [3, 4, 6], "y": {"p": 1, "q": 2}}}
assert merge_json_recursive(base, update) == expected

26
utils/json_util.py Normal file
View File

@ -0,0 +1,26 @@
def merge_json_recursive(base, update):
"""Recursively merge two JSON-like objects.
- Dictionaries are merged recursively
- Lists are concatenated
- Other types are overwritten by the update value
Args:
base: Base JSON-like object
update: Update JSON-like object to merge into base
Returns:
Merged JSON-like object
"""
if not isinstance(base, dict) or not isinstance(update, dict):
if isinstance(base, list) and isinstance(update, list):
return base + update
return update
merged = base.copy()
for key, value in update.items():
if key in merged:
merged[key] = merge_json_recursive(merged[key], value)
else:
merged[key] = value
return merged

View File

@ -1,23 +0,0 @@
import { d as defineComponent, o as openBlock, f as createElementBlock, J as renderSlot, T as normalizeClass } from "./index-DjNHn37O.js";
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "BaseViewTemplate",
props: {
dark: { type: Boolean, default: false }
},
setup(__props) {
const props = __props;
return (_ctx, _cache) => {
return openBlock(), createElementBlock("div", {
class: normalizeClass(["font-sans w-screen h-screen flex items-center justify-center pointer-events-auto overflow-auto", [
props.dark ? "text-neutral-300 bg-neutral-900 dark-theme" : "text-neutral-900 bg-neutral-300"
]])
}, [
renderSlot(_ctx.$slots, "default")
], 2);
};
}
});
export {
_sfc_main as _
};
//# sourceMappingURL=BaseViewTemplate-BNGF4K22.js.map

51
web/assets/BaseViewTemplate-Cz111_1A.js generated vendored Normal file
View File

@ -0,0 +1,51 @@
import { d as defineComponent, U as ref, p as onMounted, b4 as isElectron, W as nextTick, b5 as electronAPI, o as openBlock, f as createElementBlock, i as withDirectives, v as vShow, j as unref, b6 as isNativeWindow, m as createBaseVNode, A as renderSlot, ai as normalizeClass } from "./index-DqqhYDnY.js";
const _hoisted_1 = { class: "flex-grow w-full flex items-center justify-center overflow-auto" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "BaseViewTemplate",
props: {
dark: { type: Boolean, default: false }
},
setup(__props) {
const props = __props;
const darkTheme = {
color: "rgba(0, 0, 0, 0)",
symbolColor: "#d4d4d4"
};
const lightTheme = {
color: "rgba(0, 0, 0, 0)",
symbolColor: "#171717"
};
const topMenuRef = ref(null);
onMounted(async () => {
if (isElectron()) {
await nextTick();
electronAPI().changeTheme({
...props.dark ? darkTheme : lightTheme,
height: topMenuRef.value.getBoundingClientRect().height
});
}
});
return (_ctx, _cache) => {
return openBlock(), createElementBlock("div", {
class: normalizeClass(["font-sans w-screen h-screen flex flex-col pointer-events-auto", [
props.dark ? "text-neutral-300 bg-neutral-900 dark-theme" : "text-neutral-900 bg-neutral-300"
]])
}, [
withDirectives(createBaseVNode("div", {
ref_key: "topMenuRef",
ref: topMenuRef,
class: "app-drag w-full h-[var(--comfy-topbar-height)]"
}, null, 512), [
[vShow, unref(isNativeWindow)()]
]),
createBaseVNode("div", _hoisted_1, [
renderSlot(_ctx.$slots, "default")
])
], 2);
};
}
});
export {
_sfc_main as _
};
//# sourceMappingURL=BaseViewTemplate-Cz111_1A.js.map

22
web/assets/DesktopStartView-FKlxS2Lt.js generated vendored Normal file
View File

@ -0,0 +1,22 @@
import { d as defineComponent, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, k as createVNode, j as unref, bz as script } from "./index-DqqhYDnY.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
const _hoisted_1 = { class: "max-w-screen-sm w-screen p-8" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "DesktopStartView",
setup(__props) {
return (_ctx, _cache) => {
return openBlock(), createBlock(_sfc_main$1, { dark: "" }, {
default: withCtx(() => [
createBaseVNode("div", _hoisted_1, [
createVNode(unref(script), { mode: "indeterminate" })
])
]),
_: 1
});
};
}
});
export {
_sfc_main as default
};
//# sourceMappingURL=DesktopStartView-FKlxS2Lt.js.map

View File

@ -1,7 +1,7 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, o as openBlock, k as createBlock, M as withCtx, H as createBaseVNode, X as toDisplayString, N as createVNode, j as unref, l as script, bW as useRouter } from "./index-DjNHn37O.js"; import { d as defineComponent, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, l as script, be as useRouter } from "./index-DqqhYDnY.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BNGF4K22.js"; import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
const _hoisted_1 = { class: "max-w-screen-sm flex flex-col gap-8 p-8 bg-[url('/assets/images/Git-Logo-White.svg')] bg-no-repeat bg-right-top bg-origin-padding" }; const _hoisted_1 = { class: "max-w-screen-sm flex flex-col gap-8 p-8 bg-[url('/assets/images/Git-Logo-White.svg')] bg-no-repeat bg-right-top bg-origin-padding" };
const _hoisted_2 = { class: "mt-24 text-4xl font-bold text-red-500" }; const _hoisted_2 = { class: "mt-24 text-4xl font-bold text-red-500" };
const _hoisted_3 = { class: "space-y-4" }; const _hoisted_3 = { class: "space-y-4" };
@ -55,4 +55,4 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
export { export {
_sfc_main as default _sfc_main as default
}; };
//# sourceMappingURL=DownloadGitView-DeC7MBzG.js.map //# sourceMappingURL=DownloadGitView-DVXUne-M.js.map

View File

@ -1,9 +1,8 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, ab as ref, cn as FilterMatchMode, cs as useExtensionStore, a as useSettingStore, m as onMounted, c as computed, o as openBlock, k as createBlock, M as withCtx, N as createVNode, co as SearchBox, j as unref, bZ as script, H as createBaseVNode, f as createElementBlock, E as renderList, X as toDisplayString, aE as createTextVNode, F as Fragment, l as script$1, I as createCommentVNode, aI as script$3, bO as script$4, c4 as script$5, cp as _sfc_main$1 } from "./index-DjNHn37O.js"; import { d as defineComponent, U as ref, dl as FilterMatchMode, dr as useExtensionStore, a as useSettingStore, p as onMounted, c as computed, o as openBlock, y as createBlock, z as withCtx, k as createVNode, dm as SearchBox, j as unref, bj as script, m as createBaseVNode, f as createElementBlock, D as renderList, E as toDisplayString, a7 as createTextVNode, F as Fragment, l as script$1, B as createCommentVNode, a4 as script$3, ax as script$4, bn as script$5, dn as _sfc_main$1 } from "./index-DqqhYDnY.js";
import { s as script$2, a as script$6 } from "./index-B5F0uxTQ.js"; import { g as script$2, h as script$6 } from "./index-BapOFhAR.js";
import "./index-B-aVupP5.js"; import "./index-DXE47DZl.js";
import "./index-5HFeZax4.js";
const _hoisted_1 = { class: "flex justify-end" }; const _hoisted_1 = { class: "flex justify-end" };
const _sfc_main = /* @__PURE__ */ defineComponent({ const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "ExtensionPanel", __name: "ExtensionPanel",
@ -180,4 +179,4 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
export { export {
_sfc_main as default _sfc_main as default
}; };
//# sourceMappingURL=ExtensionPanel-D4Phn0Zr.js.map //# sourceMappingURL=ExtensionPanel-iPOrhDVM.js.map

View File

@ -1,8 +1,10 @@
.comfy-menu-hamburger[data-v-5661bed0] { .comfy-menu-hamburger[data-v-7ed57d1a] {
pointer-events: auto; pointer-events: auto;
position: fixed; position: fixed;
z-index: 9999; z-index: 9999;
display: flex;
flex-direction: row
} }
[data-v-e50caa15] .p-splitter-gutter { [data-v-e50caa15] .p-splitter-gutter {
@ -39,14 +41,14 @@
z-index: 999; z-index: 999;
} }
.p-buttongroup-vertical[data-v-cf40dd39] { .p-buttongroup-vertical[data-v-cb8f9a1a] {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
border-radius: var(--p-button-border-radius); border-radius: var(--p-button-border-radius);
overflow: hidden; overflow: hidden;
border: 1px solid var(--p-panel-border-color); border: 1px solid var(--p-panel-border-color);
} }
.p-buttongroup-vertical .p-button[data-v-cf40dd39] { .p-buttongroup-vertical .p-button[data-v-cb8f9a1a] {
margin: 0; margin: 0;
border-radius: 0; border-radius: 0;
} }
@ -82,7 +84,7 @@
font-size: inherit; font-size: inherit;
} }
[data-v-5741c9ae] .highlight { [data-v-fd0a74bd] .highlight {
background-color: var(--p-primary-color); background-color: var(--p-primary-color);
color: var(--p-primary-contrast-color); color: var(--p-primary-contrast-color);
font-weight: bold; font-weight: bold;
@ -131,16 +133,7 @@
border-right: 4px solid var(--p-button-text-primary-color); border-right: 4px solid var(--p-button-text-primary-color);
} }
:root { .side-tool-bar-container[data-v-33cac83a] {
--sidebar-width: 64px;
--sidebar-icon-size: 1.5rem;
}
:root .small-sidebar {
--sidebar-width: 40px;
--sidebar-icon-size: 1rem;
}
.side-tool-bar-container[data-v-37d8d7b4] {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
align-items: center; align-items: center;
@ -153,18 +146,91 @@
background-color: var(--comfy-menu-secondary-bg); background-color: var(--comfy-menu-secondary-bg);
color: var(--fg-color); color: var(--fg-color);
box-shadow: var(--bar-shadow); box-shadow: var(--bar-shadow);
--sidebar-width: 4rem;
--sidebar-icon-size: 1.5rem;
} }
.side-tool-bar-end[data-v-37d8d7b4] { .side-tool-bar-container.small-sidebar[data-v-33cac83a] {
--sidebar-width: 2.5rem;
--sidebar-icon-size: 1rem;
}
.side-tool-bar-end[data-v-33cac83a] {
align-self: flex-end; align-self: flex-end;
margin-top: auto; margin-top: auto;
} }
[data-v-b9328350] .p-inputtext { .status-indicator[data-v-8d011a31] {
position: absolute;
font-weight: 700;
font-size: 1.5rem;
top: 50%;
left: 50%;
transform: translate(-50%, -50%)
}
[data-v-54fadc45] .p-togglebutton {
position: relative;
flex-shrink: 0;
border-radius: 0px;
border-width: 0px;
border-right-width: 1px;
border-style: solid;
background-color: transparent;
padding: 0px;
border-right-color: var(--border-color)
}
[data-v-54fadc45] .p-togglebutton::before {
display: none
}
[data-v-54fadc45] .p-togglebutton:first-child {
border-left-width: 1px;
border-style: solid;
border-left-color: var(--border-color)
}
[data-v-54fadc45] .p-togglebutton:not(:first-child) {
border-left-width: 0px
}
[data-v-54fadc45] .p-togglebutton.p-togglebutton-checked {
height: 100%;
border-bottom-width: 1px;
border-style: solid;
border-bottom-color: var(--p-button-text-primary-color)
}
[data-v-54fadc45] .p-togglebutton:not(.p-togglebutton-checked) {
opacity: 0.75
}
[data-v-54fadc45] .p-togglebutton-checked .close-button,[data-v-54fadc45] .p-togglebutton:hover .close-button {
visibility: visible
}
[data-v-54fadc45] .p-togglebutton:hover .status-indicator {
display: none
}
[data-v-54fadc45] .p-togglebutton .close-button {
visibility: hidden
}
[data-v-54fadc45] .p-scrollpanel-content {
height: 100%
}
/* Scrollbar half opacity to avoid blocking the active tab bottom border */
[data-v-54fadc45] .p-scrollpanel:hover .p-scrollpanel-bar,[data-v-54fadc45] .p-scrollpanel:active .p-scrollpanel-bar {
opacity: 0.5
}
[data-v-54fadc45] .p-selectbutton {
height: 100%;
border-radius: 0px
}
[data-v-38831d8e] .workflow-tabs {
background-color: var(--comfy-menu-bg);
}
[data-v-26957f1f] .p-inputtext {
border-top-left-radius: 0; border-top-left-radius: 0;
border-bottom-left-radius: 0; border-bottom-left-radius: 0;
} }
.comfyui-queue-button[data-v-7f4f551b] .p-splitbutton-dropdown { .comfyui-queue-button[data-v-91a628af] .p-splitbutton-dropdown {
border-top-right-radius: 0; border-top-right-radius: 0;
border-bottom-right-radius: 0; border-bottom-right-radius: 0;
} }
@ -195,55 +261,23 @@
display: none; display: none;
} }
.top-menubar[data-v-6fecd137] .p-menubar-item-link svg { .top-menubar[data-v-56df69d2] .p-menubar-item-link svg {
display: none; display: none;
} }
[data-v-6fecd137] .p-menubar-submenu.dropdown-direction-up { [data-v-56df69d2] .p-menubar-submenu.dropdown-direction-up {
top: auto; top: auto;
bottom: 100%; bottom: 100%;
flex-direction: column-reverse; flex-direction: column-reverse;
} }
.keybinding-tag[data-v-6fecd137] { .keybinding-tag[data-v-56df69d2] {
background: var(--p-content-hover-background); background: var(--p-content-hover-background);
border-color: var(--p-content-border-color); border-color: var(--p-content-border-color);
border-style: solid; border-style: solid;
} }
.status-indicator[data-v-8d011a31] { .comfyui-menu[data-v-929e7543] {
position: absolute;
font-weight: 700;
font-size: 1.5rem;
top: 50%;
left: 50%;
transform: translate(-50%, -50%)
}
[data-v-d485c044] .p-togglebutton::before {
display: none
}
[data-v-d485c044] .p-togglebutton {
position: relative;
flex-shrink: 0;
border-radius: 0px;
background-color: transparent;
padding: 0px
}
[data-v-d485c044] .p-togglebutton.p-togglebutton-checked {
border-bottom-width: 2px;
border-bottom-color: var(--p-button-text-primary-color)
}
[data-v-d485c044] .p-togglebutton-checked .close-button,[data-v-d485c044] .p-togglebutton:hover .close-button {
visibility: visible
}
[data-v-d485c044] .p-togglebutton:hover .status-indicator {
display: none
}
[data-v-d485c044] .p-togglebutton .close-button {
visibility: hidden
}
.comfyui-menu[data-v-878b63b8] {
width: 100vw; width: 100vw;
height: var(--comfy-topbar-height);
background: var(--comfy-menu-bg); background: var(--comfy-menu-bg);
color: var(--fg-color); color: var(--fg-color);
box-shadow: var(--bar-shadow); box-shadow: var(--bar-shadow);
@ -253,18 +287,17 @@
z-index: 1000; z-index: 1000;
order: 0; order: 0;
grid-column: 1/-1; grid-column: 1/-1;
max-height: 90vh;
} }
.comfyui-menu.dropzone[data-v-878b63b8] { .comfyui-menu.dropzone[data-v-929e7543] {
background: var(--p-highlight-background); background: var(--p-highlight-background);
} }
.comfyui-menu.dropzone-active[data-v-878b63b8] { .comfyui-menu.dropzone-active[data-v-929e7543] {
background: var(--p-highlight-background-focus); background: var(--p-highlight-background-focus);
} }
[data-v-878b63b8] .p-menubar-item-label { [data-v-929e7543] .p-menubar-item-label {
line-height: revert; line-height: revert;
} }
.comfyui-logo[data-v-878b63b8] { .comfyui-logo[data-v-929e7543] {
font-size: 1.2em; font-size: 1.2em;
-webkit-user-select: none; -webkit-user-select: none;
-moz-user-select: none; -moz-user-select: none;

4682
web/assets/GraphView-D9ZzDQZV.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

1288
web/assets/InstallView-CAcYt0HL.js generated vendored

File diff suppressed because one or more lines are too long

945
web/assets/InstallView-CVZcZZXJ.js generated vendored Normal file
View File

@ -0,0 +1,945 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, U as ref, bm as useModel, o as openBlock, f as createElementBlock, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, bn as script, bh as script$1, ar as withModifiers, z as withCtx, ab as script$2, K as useI18n, c as computed, ai as normalizeClass, B as createCommentVNode, a4 as script$3, a7 as createTextVNode, b5 as electronAPI, _ as _export_sfc, p as onMounted, r as resolveDirective, bg as script$4, i as withDirectives, bo as script$5, bp as script$6, l as script$7, y as createBlock, bj as script$8, bq as MigrationItems, w as watchEffect, F as Fragment, D as renderList, br as script$9, bs as mergeModels, bt as ValidationState, Y as normalizeI18nKey, O as watch, bu as checkMirrorReachable, bv as _sfc_main$7, bw as mergeValidationStates, bc as t, a$ as script$a, bx as CUDA_TORCH_URL, by as NIGHTLY_CPU_TORCH_URL, be as useRouter, ag as toRaw } from "./index-DqqhYDnY.js";
import { s as script$b, a as script$c, b as script$d, c as script$e, d as script$f } from "./index-BNlqgrYT.js";
import { P as PYTHON_MIRROR, a as PYPI_MIRROR } from "./uvMirrors-B-HKMf6X.js";
import { _ as _sfc_main$8 } from "./BaseViewTemplate-Cz111_1A.js";
const _hoisted_1$5 = { class: "flex flex-col gap-6 w-[600px]" };
const _hoisted_2$5 = { class: "flex flex-col gap-4" };
const _hoisted_3$5 = { class: "text-2xl font-semibold text-neutral-100" };
const _hoisted_4$5 = { class: "text-neutral-400 my-0" };
const _hoisted_5$3 = { class: "flex flex-col bg-neutral-800 p-4 rounded-lg" };
const _hoisted_6$3 = { class: "flex items-center gap-4" };
const _hoisted_7$3 = { class: "flex-1" };
const _hoisted_8$3 = { class: "text-lg font-medium text-neutral-100" };
const _hoisted_9$3 = { class: "text-sm text-neutral-400 mt-1" };
const _hoisted_10$3 = { class: "flex items-center gap-4" };
const _hoisted_11$3 = { class: "flex-1" };
const _hoisted_12$3 = { class: "text-lg font-medium text-neutral-100" };
const _hoisted_13$1 = { class: "text-sm text-neutral-400 mt-1" };
const _hoisted_14$1 = { class: "text-neutral-300" };
const _hoisted_15 = { class: "font-medium mb-2" };
const _hoisted_16 = { class: "list-disc pl-6 space-y-1" };
const _hoisted_17 = { class: "font-medium mt-4 mb-2" };
const _hoisted_18 = { class: "list-disc pl-6 space-y-1" };
const _hoisted_19 = { class: "mt-4" };
const _hoisted_20 = {
href: "https://comfy.org/privacy",
target: "_blank",
class: "text-blue-400 hover:text-blue-300 underline"
};
const _sfc_main$6 = /* @__PURE__ */ defineComponent({
__name: "DesktopSettingsConfiguration",
props: {
"autoUpdate": { type: Boolean, ...{ required: true } },
"autoUpdateModifiers": {},
"allowMetrics": { type: Boolean, ...{ required: true } },
"allowMetricsModifiers": {}
},
emits: ["update:autoUpdate", "update:allowMetrics"],
setup(__props) {
const showDialog = ref(false);
const autoUpdate = useModel(__props, "autoUpdate");
const allowMetrics = useModel(__props, "allowMetrics");
const showMetricsInfo = /* @__PURE__ */ __name(() => {
showDialog.value = true;
}, "showMetricsInfo");
return (_ctx, _cache) => {
return openBlock(), createElementBlock("div", _hoisted_1$5, [
createBaseVNode("div", _hoisted_2$5, [
createBaseVNode("h2", _hoisted_3$5, toDisplayString(_ctx.$t("install.desktopAppSettings")), 1),
createBaseVNode("p", _hoisted_4$5, toDisplayString(_ctx.$t("install.desktopAppSettingsDescription")), 1)
]),
createBaseVNode("div", _hoisted_5$3, [
createBaseVNode("div", _hoisted_6$3, [
createBaseVNode("div", _hoisted_7$3, [
createBaseVNode("h3", _hoisted_8$3, toDisplayString(_ctx.$t("install.settings.autoUpdate")), 1),
createBaseVNode("p", _hoisted_9$3, toDisplayString(_ctx.$t("install.settings.autoUpdateDescription")), 1)
]),
createVNode(unref(script), {
modelValue: autoUpdate.value,
"onUpdate:modelValue": _cache[0] || (_cache[0] = ($event) => autoUpdate.value = $event)
}, null, 8, ["modelValue"])
]),
createVNode(unref(script$1)),
createBaseVNode("div", _hoisted_10$3, [
createBaseVNode("div", _hoisted_11$3, [
createBaseVNode("h3", _hoisted_12$3, toDisplayString(_ctx.$t("install.settings.allowMetrics")), 1),
createBaseVNode("p", _hoisted_13$1, toDisplayString(_ctx.$t("install.settings.allowMetricsDescription")), 1),
createBaseVNode("a", {
href: "#",
class: "text-sm text-blue-400 hover:text-blue-300 mt-1 inline-block",
onClick: withModifiers(showMetricsInfo, ["prevent"])
}, toDisplayString(_ctx.$t("install.settings.learnMoreAboutData")), 1)
]),
createVNode(unref(script), {
modelValue: allowMetrics.value,
"onUpdate:modelValue": _cache[1] || (_cache[1] = ($event) => allowMetrics.value = $event)
}, null, 8, ["modelValue"])
])
]),
createVNode(unref(script$2), {
visible: showDialog.value,
"onUpdate:visible": _cache[2] || (_cache[2] = ($event) => showDialog.value = $event),
modal: "",
header: _ctx.$t("install.settings.dataCollectionDialog.title")
}, {
default: withCtx(() => [
createBaseVNode("div", _hoisted_14$1, [
createBaseVNode("h4", _hoisted_15, toDisplayString(_ctx.$t("install.settings.dataCollectionDialog.whatWeCollect")), 1),
createBaseVNode("ul", _hoisted_16, [
createBaseVNode("li", null, toDisplayString(_ctx.$t("install.settings.dataCollectionDialog.collect.errorReports")), 1),
createBaseVNode("li", null, toDisplayString(_ctx.$t("install.settings.dataCollectionDialog.collect.systemInfo")), 1),
createBaseVNode("li", null, toDisplayString(_ctx.$t(
"install.settings.dataCollectionDialog.collect.userJourneyEvents"
)), 1)
]),
createBaseVNode("h4", _hoisted_17, toDisplayString(_ctx.$t("install.settings.dataCollectionDialog.whatWeDoNotCollect")), 1),
createBaseVNode("ul", _hoisted_18, [
createBaseVNode("li", null, toDisplayString(_ctx.$t(
"install.settings.dataCollectionDialog.doNotCollect.personalInformation"
)), 1),
createBaseVNode("li", null, toDisplayString(_ctx.$t(
"install.settings.dataCollectionDialog.doNotCollect.workflowContents"
)), 1),
createBaseVNode("li", null, toDisplayString(_ctx.$t(
"install.settings.dataCollectionDialog.doNotCollect.fileSystemInformation"
)), 1),
createBaseVNode("li", null, toDisplayString(_ctx.$t(
"install.settings.dataCollectionDialog.doNotCollect.customNodeConfigurations"
)), 1)
]),
createBaseVNode("div", _hoisted_19, [
createBaseVNode("a", _hoisted_20, toDisplayString(_ctx.$t("install.settings.dataCollectionDialog.viewFullPolicy")), 1)
])
])
]),
_: 1
}, 8, ["visible", "header"])
]);
};
}
});
const _imports_0 = "" + new URL("images/nvidia-logo.svg", import.meta.url).href;
const _imports_1 = "" + new URL("images/apple-mps-logo.png", import.meta.url).href;
const _imports_2 = "" + new URL("images/manual-configuration.svg", import.meta.url).href;
const _hoisted_1$4 = { class: "flex flex-col gap-6 w-[600px] h-[30rem] select-none" };
const _hoisted_2$4 = { class: "grow flex flex-col gap-4 text-neutral-300" };
const _hoisted_3$4 = { class: "text-2xl font-semibold text-neutral-100" };
const _hoisted_4$4 = { class: "m-1 text-neutral-400" };
const _hoisted_5$2 = {
key: 0,
class: "m-1"
};
const _hoisted_6$2 = {
key: 1,
class: "m-1"
};
const _hoisted_7$2 = {
key: 2,
class: "text-neutral-300"
};
const _hoisted_8$2 = { class: "m-1" };
const _hoisted_9$2 = { key: 3 };
const _hoisted_10$2 = { class: "m-1" };
const _hoisted_11$2 = { class: "m-1" };
const _hoisted_12$2 = {
for: "cpu-mode",
class: "select-none"
};
const _sfc_main$5 = /* @__PURE__ */ defineComponent({
__name: "GpuPicker",
props: {
"device": {
required: true
},
"deviceModifiers": {}
},
emits: ["update:device"],
setup(__props) {
const { t: t2 } = useI18n();
const cpuMode = computed({
get: /* @__PURE__ */ __name(() => selected.value === "cpu", "get"),
set: /* @__PURE__ */ __name((value) => {
selected.value = value ? "cpu" : null;
}, "set")
});
const selected = useModel(__props, "device");
const electron = electronAPI();
const platform = electron.getPlatform();
const pickGpu = /* @__PURE__ */ __name((value) => {
const newValue = selected.value === value ? null : value;
selected.value = newValue;
}, "pickGpu");
return (_ctx, _cache) => {
return openBlock(), createElementBlock("div", _hoisted_1$4, [
createBaseVNode("div", _hoisted_2$4, [
createBaseVNode("h2", _hoisted_3$4, toDisplayString(_ctx.$t("install.gpuSelection.selectGpu")), 1),
createBaseVNode("p", _hoisted_4$4, toDisplayString(_ctx.$t("install.gpuSelection.selectGpuDescription")) + ": ", 1),
createBaseVNode("div", {
class: normalizeClass(["flex gap-2 text-center transition-opacity", { selected: selected.value }])
}, [
unref(platform) !== "darwin" ? (openBlock(), createElementBlock("div", {
key: 0,
class: normalizeClass(["gpu-button", { selected: selected.value === "nvidia" }]),
role: "button",
onClick: _cache[0] || (_cache[0] = ($event) => pickGpu("nvidia"))
}, _cache[4] || (_cache[4] = [
createBaseVNode("img", {
class: "m-12",
alt: "NVIDIA logo",
width: "196",
height: "32",
src: _imports_0
}, null, -1)
]), 2)) : createCommentVNode("", true),
unref(platform) === "darwin" ? (openBlock(), createElementBlock("div", {
key: 1,
class: normalizeClass(["gpu-button", { selected: selected.value === "mps" }]),
role: "button",
onClick: _cache[1] || (_cache[1] = ($event) => pickGpu("mps"))
}, _cache[5] || (_cache[5] = [
createBaseVNode("img", {
class: "rounded-lg hover-brighten",
alt: "Apple Metal Performance Shaders Logo",
width: "292",
ratio: "",
src: _imports_1
}, null, -1)
]), 2)) : createCommentVNode("", true),
createBaseVNode("div", {
class: normalizeClass(["gpu-button", { selected: selected.value === "unsupported" }]),
role: "button",
onClick: _cache[2] || (_cache[2] = ($event) => pickGpu("unsupported"))
}, _cache[6] || (_cache[6] = [
createBaseVNode("img", {
class: "m-12",
alt: "Manual configuration",
width: "196",
src: _imports_2
}, null, -1)
]), 2)
], 2),
selected.value === "nvidia" ? (openBlock(), createElementBlock("p", _hoisted_5$2, [
createVNode(unref(script$3), {
icon: "pi pi-check",
severity: "success",
value: "CUDA"
}),
createTextVNode(" " + toDisplayString(_ctx.$t("install.gpuSelection.nvidiaDescription")), 1)
])) : createCommentVNode("", true),
selected.value === "mps" ? (openBlock(), createElementBlock("p", _hoisted_6$2, [
createVNode(unref(script$3), {
icon: "pi pi-check",
severity: "success",
value: "MPS"
}),
createTextVNode(" " + toDisplayString(_ctx.$t("install.gpuSelection.mpsDescription")), 1)
])) : createCommentVNode("", true),
selected.value === "unsupported" ? (openBlock(), createElementBlock("div", _hoisted_7$2, [
createBaseVNode("p", _hoisted_8$2, [
createVNode(unref(script$3), {
icon: "pi pi-exclamation-triangle",
severity: "warn",
value: unref(t2)("icon.exclamation-triangle")
}, null, 8, ["value"]),
createTextVNode(" " + toDisplayString(_ctx.$t("install.gpuSelection.customSkipsPython")), 1)
]),
createBaseVNode("ul", null, [
createBaseVNode("li", null, [
createBaseVNode("strong", null, toDisplayString(_ctx.$t("install.gpuSelection.customComfyNeedsPython")), 1)
]),
createBaseVNode("li", null, toDisplayString(_ctx.$t("install.gpuSelection.customManualVenv")), 1),
createBaseVNode("li", null, toDisplayString(_ctx.$t("install.gpuSelection.customInstallRequirements")), 1),
createBaseVNode("li", null, toDisplayString(_ctx.$t("install.gpuSelection.customMayNotWork")), 1)
])
])) : createCommentVNode("", true),
selected.value === "cpu" ? (openBlock(), createElementBlock("div", _hoisted_9$2, [
createBaseVNode("p", _hoisted_10$2, [
createVNode(unref(script$3), {
icon: "pi pi-exclamation-triangle",
severity: "warn",
value: unref(t2)("icon.exclamation-triangle")
}, null, 8, ["value"]),
createTextVNode(" " + toDisplayString(_ctx.$t("install.gpuSelection.cpuModeDescription")), 1)
]),
createBaseVNode("p", _hoisted_11$2, toDisplayString(_ctx.$t("install.gpuSelection.cpuModeDescription2")), 1)
])) : createCommentVNode("", true)
]),
createBaseVNode("div", {
class: normalizeClass(["transition-opacity flex gap-3 h-0", {
"opacity-40": selected.value && selected.value !== "cpu"
}])
}, [
createVNode(unref(script), {
modelValue: cpuMode.value,
"onUpdate:modelValue": _cache[3] || (_cache[3] = ($event) => cpuMode.value = $event),
inputId: "cpu-mode",
class: "-translate-y-40"
}, null, 8, ["modelValue"]),
createBaseVNode("label", _hoisted_12$2, toDisplayString(_ctx.$t("install.gpuSelection.enableCpuMode")), 1)
], 2)
]);
};
}
});
const GpuPicker = /* @__PURE__ */ _export_sfc(_sfc_main$5, [["__scopeId", "data-v-79125ff6"]]);
const _hoisted_1$3 = { class: "flex flex-col gap-6 w-[600px]" };
const _hoisted_2$3 = { class: "flex flex-col gap-4" };
const _hoisted_3$3 = { class: "text-2xl font-semibold text-neutral-100" };
const _hoisted_4$3 = { class: "text-neutral-400 my-0" };
const _hoisted_5$1 = { class: "flex gap-2" };
const _hoisted_6$1 = { class: "bg-neutral-800 p-4 rounded-lg" };
const _hoisted_7$1 = { class: "text-lg font-medium mt-0 mb-3 text-neutral-100" };
const _hoisted_8$1 = { class: "flex flex-col gap-2" };
const _hoisted_9$1 = { class: "flex items-center gap-2" };
const _hoisted_10$1 = { class: "text-neutral-200" };
const _hoisted_11$1 = { class: "pi pi-info-circle" };
const _hoisted_12$1 = { class: "flex items-center gap-2" };
const _hoisted_13 = { class: "text-neutral-200" };
const _hoisted_14 = { class: "pi pi-info-circle" };
const _sfc_main$4 = /* @__PURE__ */ defineComponent({
__name: "InstallLocationPicker",
props: {
"installPath": { required: true },
"installPathModifiers": {},
"pathError": { required: true },
"pathErrorModifiers": {}
},
emits: ["update:installPath", "update:pathError"],
setup(__props) {
const { t: t2 } = useI18n();
const installPath = useModel(__props, "installPath");
const pathError = useModel(__props, "pathError");
const pathExists = ref(false);
const appData = ref("");
const appPath = ref("");
const electron = electronAPI();
onMounted(async () => {
const paths = await electron.getSystemPaths();
appData.value = paths.appData;
appPath.value = paths.appPath;
installPath.value = paths.defaultInstallPath;
await validatePath(paths.defaultInstallPath);
});
const validatePath = /* @__PURE__ */ __name(async (path) => {
try {
pathError.value = "";
pathExists.value = false;
const validation = await electron.validateInstallPath(path);
if (!validation.isValid) {
const errors = [];
if (validation.cannotWrite) errors.push(t2("install.cannotWrite"));
if (validation.freeSpace < validation.requiredSpace) {
const requiredGB = validation.requiredSpace / 1024 / 1024 / 1024;
errors.push(`${t2("install.insufficientFreeSpace")}: ${requiredGB} GB`);
}
if (validation.parentMissing) errors.push(t2("install.parentMissing"));
if (validation.error)
errors.push(`${t2("install.unhandledError")}: ${validation.error}`);
pathError.value = errors.join("\n");
}
if (validation.exists) pathExists.value = true;
} catch (error) {
pathError.value = t2("install.pathValidationFailed");
}
}, "validatePath");
const browsePath = /* @__PURE__ */ __name(async () => {
try {
const result = await electron.showDirectoryPicker();
if (result) {
installPath.value = result;
await validatePath(result);
}
} catch (error) {
pathError.value = t2("install.failedToSelectDirectory");
}
}, "browsePath");
return (_ctx, _cache) => {
const _directive_tooltip = resolveDirective("tooltip");
return openBlock(), createElementBlock("div", _hoisted_1$3, [
createBaseVNode("div", _hoisted_2$3, [
createBaseVNode("h2", _hoisted_3$3, toDisplayString(_ctx.$t("install.chooseInstallationLocation")), 1),
createBaseVNode("p", _hoisted_4$3, toDisplayString(_ctx.$t("install.installLocationDescription")), 1),
createBaseVNode("div", _hoisted_5$1, [
createVNode(unref(script$6), { class: "flex-1" }, {
default: withCtx(() => [
createVNode(unref(script$4), {
modelValue: installPath.value,
"onUpdate:modelValue": [
_cache[0] || (_cache[0] = ($event) => installPath.value = $event),
validatePath
],
class: normalizeClass(["w-full", { "p-invalid": pathError.value }])
}, null, 8, ["modelValue", "class"]),
withDirectives(createVNode(unref(script$5), { class: "pi pi-info-circle" }, null, 512), [
[_directive_tooltip, _ctx.$t("install.installLocationTooltip")]
])
]),
_: 1
}),
createVNode(unref(script$7), {
icon: "pi pi-folder",
onClick: browsePath,
class: "w-12"
})
]),
pathError.value ? (openBlock(), createBlock(unref(script$8), {
key: 0,
severity: "error",
class: "whitespace-pre-line"
}, {
default: withCtx(() => [
createTextVNode(toDisplayString(pathError.value), 1)
]),
_: 1
})) : createCommentVNode("", true),
pathExists.value ? (openBlock(), createBlock(unref(script$8), {
key: 1,
severity: "warn"
}, {
default: withCtx(() => [
createTextVNode(toDisplayString(_ctx.$t("install.pathExists")), 1)
]),
_: 1
})) : createCommentVNode("", true)
]),
createBaseVNode("div", _hoisted_6$1, [
createBaseVNode("h3", _hoisted_7$1, toDisplayString(_ctx.$t("install.systemLocations")), 1),
createBaseVNode("div", _hoisted_8$1, [
createBaseVNode("div", _hoisted_9$1, [
_cache[1] || (_cache[1] = createBaseVNode("i", { class: "pi pi-folder text-neutral-400" }, null, -1)),
_cache[2] || (_cache[2] = createBaseVNode("span", { class: "text-neutral-400" }, "App Data:", -1)),
createBaseVNode("span", _hoisted_10$1, toDisplayString(appData.value), 1),
withDirectives(createBaseVNode("span", _hoisted_11$1, null, 512), [
[_directive_tooltip, _ctx.$t("install.appDataLocationTooltip")]
])
]),
createBaseVNode("div", _hoisted_12$1, [
_cache[3] || (_cache[3] = createBaseVNode("i", { class: "pi pi-desktop text-neutral-400" }, null, -1)),
_cache[4] || (_cache[4] = createBaseVNode("span", { class: "text-neutral-400" }, "App Path:", -1)),
createBaseVNode("span", _hoisted_13, toDisplayString(appPath.value), 1),
withDirectives(createBaseVNode("span", _hoisted_14, null, 512), [
[_directive_tooltip, _ctx.$t("install.appPathLocationTooltip")]
])
])
])
])
]);
};
}
});
const _hoisted_1$2 = { class: "flex flex-col gap-6 w-[600px]" };
const _hoisted_2$2 = { class: "flex flex-col gap-4" };
const _hoisted_3$2 = { class: "text-2xl font-semibold text-neutral-100" };
const _hoisted_4$2 = { class: "text-neutral-400 my-0" };
const _hoisted_5 = { class: "flex gap-2" };
const _hoisted_6 = {
key: 0,
class: "flex flex-col gap-4 bg-neutral-800 p-4 rounded-lg"
};
const _hoisted_7 = { class: "text-lg mt-0 font-medium text-neutral-100" };
const _hoisted_8 = { class: "flex flex-col gap-3" };
const _hoisted_9 = ["onClick"];
const _hoisted_10 = ["for"];
const _hoisted_11 = { class: "text-sm text-neutral-400 my-1" };
const _hoisted_12 = {
key: 1,
class: "text-neutral-400 italic"
};
const _sfc_main$3 = /* @__PURE__ */ defineComponent({
__name: "MigrationPicker",
props: {
"sourcePath": { required: false },
"sourcePathModifiers": {},
"migrationItemIds": {
required: false
},
"migrationItemIdsModifiers": {}
},
emits: ["update:sourcePath", "update:migrationItemIds"],
setup(__props) {
const { t: t2 } = useI18n();
const electron = electronAPI();
const sourcePath = useModel(__props, "sourcePath");
const migrationItemIds = useModel(__props, "migrationItemIds");
const migrationItems = ref(
MigrationItems.map((item) => ({
...item,
selected: true
}))
);
const pathError = ref("");
const isValidSource = computed(
() => sourcePath.value !== "" && pathError.value === ""
);
const validateSource = /* @__PURE__ */ __name(async (sourcePath2) => {
if (!sourcePath2) {
pathError.value = "";
return;
}
try {
pathError.value = "";
const validation = await electron.validateComfyUISource(sourcePath2);
if (!validation.isValid) pathError.value = validation.error;
} catch (error) {
console.error(error);
pathError.value = t2("install.pathValidationFailed");
}
}, "validateSource");
const browsePath = /* @__PURE__ */ __name(async () => {
try {
const result = await electron.showDirectoryPicker();
if (result) {
sourcePath.value = result;
await validateSource(result);
}
} catch (error) {
console.error(error);
pathError.value = t2("install.failedToSelectDirectory");
}
}, "browsePath");
watchEffect(() => {
migrationItemIds.value = migrationItems.value.filter((item) => item.selected).map((item) => item.id);
});
return (_ctx, _cache) => {
return openBlock(), createElementBlock("div", _hoisted_1$2, [
createBaseVNode("div", _hoisted_2$2, [
createBaseVNode("h2", _hoisted_3$2, toDisplayString(_ctx.$t("install.migrateFromExistingInstallation")), 1),
createBaseVNode("p", _hoisted_4$2, toDisplayString(_ctx.$t("install.migrationSourcePathDescription")), 1),
createBaseVNode("div", _hoisted_5, [
createVNode(unref(script$4), {
modelValue: sourcePath.value,
"onUpdate:modelValue": [
_cache[0] || (_cache[0] = ($event) => sourcePath.value = $event),
validateSource
],
placeholder: "Select existing ComfyUI installation (optional)",
class: normalizeClass(["flex-1", { "p-invalid": pathError.value }])
}, null, 8, ["modelValue", "class"]),
createVNode(unref(script$7), {
icon: "pi pi-folder",
onClick: browsePath,
class: "w-12"
})
]),
pathError.value ? (openBlock(), createBlock(unref(script$8), {
key: 0,
severity: "error"
}, {
default: withCtx(() => [
createTextVNode(toDisplayString(pathError.value), 1)
]),
_: 1
})) : createCommentVNode("", true)
]),
isValidSource.value ? (openBlock(), createElementBlock("div", _hoisted_6, [
createBaseVNode("h3", _hoisted_7, toDisplayString(_ctx.$t("install.selectItemsToMigrate")), 1),
createBaseVNode("div", _hoisted_8, [
(openBlock(true), createElementBlock(Fragment, null, renderList(migrationItems.value, (item) => {
return openBlock(), createElementBlock("div", {
key: item.id,
class: "flex items-center gap-3 p-2 hover:bg-neutral-700 rounded",
onClick: /* @__PURE__ */ __name(($event) => item.selected = !item.selected, "onClick")
}, [
createVNode(unref(script$9), {
modelValue: item.selected,
"onUpdate:modelValue": /* @__PURE__ */ __name(($event) => item.selected = $event, "onUpdate:modelValue"),
inputId: item.id,
binary: true,
onClick: _cache[1] || (_cache[1] = withModifiers(() => {
}, ["stop"]))
}, null, 8, ["modelValue", "onUpdate:modelValue", "inputId"]),
createBaseVNode("div", null, [
createBaseVNode("label", {
for: item.id,
class: "text-neutral-200 font-medium"
}, toDisplayString(item.label), 9, _hoisted_10),
createBaseVNode("p", _hoisted_11, toDisplayString(item.description), 1)
])
], 8, _hoisted_9);
}), 128))
])
])) : (openBlock(), createElementBlock("div", _hoisted_12, toDisplayString(_ctx.$t("install.migrationOptional")), 1))
]);
};
}
});
const _hoisted_1$1 = { class: "flex flex-col items-center gap-4" };
const _hoisted_2$1 = { class: "w-full" };
const _hoisted_3$1 = { class: "text-lg font-medium text-neutral-100" };
const _hoisted_4$1 = { class: "text-sm text-neutral-400 mt-1" };
const _sfc_main$2 = /* @__PURE__ */ defineComponent({
__name: "MirrorItem",
props: /* @__PURE__ */ mergeModels({
item: {}
}, {
"modelValue": { required: true },
"modelModifiers": {}
}),
emits: /* @__PURE__ */ mergeModels(["state-change"], ["update:modelValue"]),
setup(__props, { emit: __emit }) {
const emit = __emit;
const modelValue = useModel(__props, "modelValue");
const validationState = ref(ValidationState.IDLE);
const normalizedSettingId = computed(() => {
return normalizeI18nKey(__props.item.settingId);
});
onMounted(() => {
modelValue.value = __props.item.mirror;
});
watch(validationState, (newState) => {
emit("state-change", newState);
if (newState === ValidationState.INVALID && modelValue.value === __props.item.mirror) {
modelValue.value = __props.item.fallbackMirror;
}
});
return (_ctx, _cache) => {
const _component_UrlInput = _sfc_main$7;
return openBlock(), createElementBlock("div", _hoisted_1$1, [
createBaseVNode("div", _hoisted_2$1, [
createBaseVNode("h3", _hoisted_3$1, toDisplayString(_ctx.$t(`settings.${normalizedSettingId.value}.name`)), 1),
createBaseVNode("p", _hoisted_4$1, toDisplayString(_ctx.$t(`settings.${normalizedSettingId.value}.tooltip`)), 1)
]),
createVNode(_component_UrlInput, {
modelValue: modelValue.value,
"onUpdate:modelValue": _cache[0] || (_cache[0] = ($event) => modelValue.value = $event),
"validate-url-fn": /* @__PURE__ */ __name((mirror) => unref(checkMirrorReachable)(mirror + (_ctx.item.validationPathSuffix ?? "")), "validate-url-fn"),
onStateChange: _cache[1] || (_cache[1] = ($event) => validationState.value = $event)
}, null, 8, ["modelValue", "validate-url-fn"])
]);
};
}
});
const _sfc_main$1 = /* @__PURE__ */ defineComponent({
__name: "MirrorsConfiguration",
props: /* @__PURE__ */ mergeModels({
device: {}
}, {
"pythonMirror": { required: true },
"pythonMirrorModifiers": {},
"pypiMirror": { required: true },
"pypiMirrorModifiers": {},
"torchMirror": { required: true },
"torchMirrorModifiers": {}
}),
emits: ["update:pythonMirror", "update:pypiMirror", "update:torchMirror"],
setup(__props) {
const showMirrorInputs = ref(false);
const pythonMirror = useModel(__props, "pythonMirror");
const pypiMirror = useModel(__props, "pypiMirror");
const torchMirror = useModel(__props, "torchMirror");
const getTorchMirrorItem = /* @__PURE__ */ __name((device) => {
const settingId = "Comfy-Desktop.UV.TorchInstallMirror";
switch (device) {
case "mps":
return {
settingId,
mirror: NIGHTLY_CPU_TORCH_URL,
fallbackMirror: NIGHTLY_CPU_TORCH_URL
};
case "nvidia":
return {
settingId,
mirror: CUDA_TORCH_URL,
fallbackMirror: CUDA_TORCH_URL
};
case "cpu":
default:
return {
settingId,
mirror: PYPI_MIRROR.mirror,
fallbackMirror: PYPI_MIRROR.fallbackMirror
};
}
}, "getTorchMirrorItem");
const mirrors = computed(() => [
[PYTHON_MIRROR, pythonMirror],
[PYPI_MIRROR, pypiMirror],
[getTorchMirrorItem(__props.device), torchMirror]
]);
const validationStates = ref(
mirrors.value.map(() => ValidationState.IDLE)
);
const validationState = computed(() => {
return mergeValidationStates(validationStates.value);
});
const validationStateTooltip = computed(() => {
switch (validationState.value) {
case ValidationState.INVALID:
return t("install.settings.mirrorsUnreachable");
case ValidationState.VALID:
return t("install.settings.mirrorsReachable");
default:
return t("install.settings.checkingMirrors");
}
});
return (_ctx, _cache) => {
const _directive_tooltip = resolveDirective("tooltip");
return openBlock(), createBlock(unref(script$a), {
header: _ctx.$t("install.settings.mirrorSettings"),
toggleable: "",
collapsed: !showMirrorInputs.value,
"pt:root": "bg-neutral-800 border-none w-[600px]"
}, {
icons: withCtx(() => [
withDirectives(createBaseVNode("i", {
class: normalizeClass({
"pi pi-spin pi-spinner text-neutral-400": validationState.value === unref(ValidationState).LOADING,
"pi pi-check text-green-500": validationState.value === unref(ValidationState).VALID,
"pi pi-times text-red-500": validationState.value === unref(ValidationState).INVALID
})
}, null, 2), [
[_directive_tooltip, validationStateTooltip.value]
])
]),
default: withCtx(() => [
(openBlock(true), createElementBlock(Fragment, null, renderList(mirrors.value, ([item, modelValue], index) => {
return openBlock(), createElementBlock(Fragment, {
key: item.settingId + item.mirror
}, [
index > 0 ? (openBlock(), createBlock(unref(script$1), { key: 0 })) : createCommentVNode("", true),
createVNode(_sfc_main$2, {
item,
modelValue: modelValue.value,
"onUpdate:modelValue": /* @__PURE__ */ __name(($event) => modelValue.value = $event, "onUpdate:modelValue"),
onStateChange: /* @__PURE__ */ __name(($event) => validationStates.value[index] = $event, "onStateChange")
}, null, 8, ["item", "modelValue", "onUpdate:modelValue", "onStateChange"])
], 64);
}), 128))
]),
_: 1
}, 8, ["header", "collapsed"]);
};
}
});
const _hoisted_1 = { class: "flex pt-6 justify-end" };
const _hoisted_2 = { class: "flex pt-6 justify-between" };
const _hoisted_3 = { class: "flex pt-6 justify-between" };
const _hoisted_4 = { class: "flex mt-6 justify-between" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "InstallView",
setup(__props) {
const device = ref(null);
const installPath = ref("");
const pathError = ref("");
const migrationSourcePath = ref("");
const migrationItemIds = ref([]);
const autoUpdate = ref(true);
const allowMetrics = ref(true);
const pythonMirror = ref("");
const pypiMirror = ref("");
const torchMirror = ref("");
const highestStep = ref(0);
const handleStepChange = /* @__PURE__ */ __name((value) => {
setHighestStep(value);
electronAPI().Events.trackEvent("install_stepper_change", {
step: value
});
}, "handleStepChange");
const setHighestStep = /* @__PURE__ */ __name((value) => {
const int = typeof value === "number" ? value : parseInt(value, 10);
if (!isNaN(int) && int > highestStep.value) highestStep.value = int;
}, "setHighestStep");
const hasError = computed(() => pathError.value !== "");
const noGpu = computed(() => typeof device.value !== "string");
const electron = electronAPI();
const router = useRouter();
const install = /* @__PURE__ */ __name(() => {
const options = {
installPath: installPath.value,
autoUpdate: autoUpdate.value,
allowMetrics: allowMetrics.value,
migrationSourcePath: migrationSourcePath.value,
migrationItemIds: toRaw(migrationItemIds.value),
pythonMirror: pythonMirror.value,
pypiMirror: pypiMirror.value,
torchMirror: torchMirror.value,
device: device.value
};
electron.installComfyUI(options);
const nextPage = options.device === "unsupported" ? "/manual-configuration" : "/server-start";
router.push(nextPage);
}, "install");
onMounted(async () => {
if (!electron) return;
const detectedGpu = await electron.Config.getDetectedGpu();
if (detectedGpu === "mps" || detectedGpu === "nvidia") {
device.value = detectedGpu;
}
electronAPI().Events.trackEvent("install_stepper_change", {
step: "0",
gpu: detectedGpu
});
});
return (_ctx, _cache) => {
return openBlock(), createBlock(_sfc_main$8, { dark: "" }, {
default: withCtx(() => [
createVNode(unref(script$f), {
class: "h-full p-8 2xl:p-16",
value: "0",
"onUpdate:value": handleStepChange
}, {
default: withCtx(() => [
createVNode(unref(script$b), { class: "select-none" }, {
default: withCtx(() => [
createVNode(unref(script$c), { value: "0" }, {
default: withCtx(() => [
createTextVNode(toDisplayString(_ctx.$t("install.gpu")), 1)
]),
_: 1
}),
createVNode(unref(script$c), {
value: "1",
disabled: noGpu.value
}, {
default: withCtx(() => [
createTextVNode(toDisplayString(_ctx.$t("install.installLocation")), 1)
]),
_: 1
}, 8, ["disabled"]),
createVNode(unref(script$c), {
value: "2",
disabled: noGpu.value || hasError.value || highestStep.value < 1
}, {
default: withCtx(() => [
createTextVNode(toDisplayString(_ctx.$t("install.migration")), 1)
]),
_: 1
}, 8, ["disabled"]),
createVNode(unref(script$c), {
value: "3",
disabled: noGpu.value || hasError.value || highestStep.value < 2
}, {
default: withCtx(() => [
createTextVNode(toDisplayString(_ctx.$t("install.desktopSettings")), 1)
]),
_: 1
}, 8, ["disabled"])
]),
_: 1
}),
createVNode(unref(script$d), null, {
default: withCtx(() => [
createVNode(unref(script$e), { value: "0" }, {
default: withCtx(({ activateCallback }) => [
createVNode(GpuPicker, {
device: device.value,
"onUpdate:device": _cache[0] || (_cache[0] = ($event) => device.value = $event)
}, null, 8, ["device"]),
createBaseVNode("div", _hoisted_1, [
createVNode(unref(script$7), {
label: _ctx.$t("g.next"),
icon: "pi pi-arrow-right",
iconPos: "right",
onClick: /* @__PURE__ */ __name(($event) => activateCallback("1"), "onClick"),
disabled: typeof device.value !== "string"
}, null, 8, ["label", "onClick", "disabled"])
])
]),
_: 1
}),
createVNode(unref(script$e), { value: "1" }, {
default: withCtx(({ activateCallback }) => [
createVNode(_sfc_main$4, {
installPath: installPath.value,
"onUpdate:installPath": _cache[1] || (_cache[1] = ($event) => installPath.value = $event),
pathError: pathError.value,
"onUpdate:pathError": _cache[2] || (_cache[2] = ($event) => pathError.value = $event)
}, null, 8, ["installPath", "pathError"]),
createBaseVNode("div", _hoisted_2, [
createVNode(unref(script$7), {
label: _ctx.$t("g.back"),
severity: "secondary",
icon: "pi pi-arrow-left",
onClick: /* @__PURE__ */ __name(($event) => activateCallback("0"), "onClick")
}, null, 8, ["label", "onClick"]),
createVNode(unref(script$7), {
label: _ctx.$t("g.next"),
icon: "pi pi-arrow-right",
iconPos: "right",
onClick: /* @__PURE__ */ __name(($event) => activateCallback("2"), "onClick"),
disabled: pathError.value !== ""
}, null, 8, ["label", "onClick", "disabled"])
])
]),
_: 1
}),
createVNode(unref(script$e), { value: "2" }, {
default: withCtx(({ activateCallback }) => [
createVNode(_sfc_main$3, {
sourcePath: migrationSourcePath.value,
"onUpdate:sourcePath": _cache[3] || (_cache[3] = ($event) => migrationSourcePath.value = $event),
migrationItemIds: migrationItemIds.value,
"onUpdate:migrationItemIds": _cache[4] || (_cache[4] = ($event) => migrationItemIds.value = $event)
}, null, 8, ["sourcePath", "migrationItemIds"]),
createBaseVNode("div", _hoisted_3, [
createVNode(unref(script$7), {
label: _ctx.$t("g.back"),
severity: "secondary",
icon: "pi pi-arrow-left",
onClick: /* @__PURE__ */ __name(($event) => activateCallback("1"), "onClick")
}, null, 8, ["label", "onClick"]),
createVNode(unref(script$7), {
label: _ctx.$t("g.next"),
icon: "pi pi-arrow-right",
iconPos: "right",
onClick: /* @__PURE__ */ __name(($event) => activateCallback("3"), "onClick")
}, null, 8, ["label", "onClick"])
])
]),
_: 1
}),
createVNode(unref(script$e), { value: "3" }, {
default: withCtx(({ activateCallback }) => [
createVNode(_sfc_main$6, {
autoUpdate: autoUpdate.value,
"onUpdate:autoUpdate": _cache[5] || (_cache[5] = ($event) => autoUpdate.value = $event),
allowMetrics: allowMetrics.value,
"onUpdate:allowMetrics": _cache[6] || (_cache[6] = ($event) => allowMetrics.value = $event)
}, null, 8, ["autoUpdate", "allowMetrics"]),
createVNode(_sfc_main$1, {
device: device.value,
pythonMirror: pythonMirror.value,
"onUpdate:pythonMirror": _cache[7] || (_cache[7] = ($event) => pythonMirror.value = $event),
pypiMirror: pypiMirror.value,
"onUpdate:pypiMirror": _cache[8] || (_cache[8] = ($event) => pypiMirror.value = $event),
torchMirror: torchMirror.value,
"onUpdate:torchMirror": _cache[9] || (_cache[9] = ($event) => torchMirror.value = $event),
class: "mt-6"
}, null, 8, ["device", "pythonMirror", "pypiMirror", "torchMirror"]),
createBaseVNode("div", _hoisted_4, [
createVNode(unref(script$7), {
label: _ctx.$t("g.back"),
severity: "secondary",
icon: "pi pi-arrow-left",
onClick: /* @__PURE__ */ __name(($event) => activateCallback("2"), "onClick")
}, null, 8, ["label", "onClick"]),
createVNode(unref(script$7), {
label: _ctx.$t("g.install"),
icon: "pi pi-check",
iconPos: "right",
disabled: hasError.value,
onClick: _cache[10] || (_cache[10] = ($event) => install())
}, null, 8, ["label", "disabled"])
])
]),
_: 1
})
]),
_: 1
})
]),
_: 1
})
]),
_: 1
});
};
}
});
const InstallView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-cd6731d2"]]);
export {
InstallView as default
};
//# sourceMappingURL=InstallView-CVZcZZXJ.js.map

View File

@ -1,18 +1,20 @@
:root { .p-tag[data-v-79125ff6] {
--p-tag-gap: 0.5rem; --p-tag-gap: 0.5rem;
} }
.hover-brighten { .hover-brighten {
&[data-v-79125ff6] {
transition-property: color, background-color, border-color, text-decoration-color, fill, stroke; transition-property: color, background-color, border-color, text-decoration-color, fill, stroke;
transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1);
transition-duration: 150ms; transition-duration: 150ms;
transition-property: filter, box-shadow; transition-property: filter, box-shadow;
&:hover { }
&[data-v-79125ff6]:hover {
filter: brightness(107%) contrast(105%); filter: brightness(107%) contrast(105%);
box-shadow: 0 0 0.25rem #ffffff79; box-shadow: 0 0 0.25rem #ffffff79;
} }
} }
.p-accordioncontent-content { .p-accordioncontent-content[data-v-79125ff6] {
border-radius: 0.5rem; border-radius: 0.5rem;
--tw-bg-opacity: 1; --tw-bg-opacity: 1;
background-color: rgb(23 23 23 / var(--tw-bg-opacity)); background-color: rgb(23 23 23 / var(--tw-bg-opacity));
@ -21,14 +23,14 @@
transition-duration: 150ms; transition-duration: 150ms;
} }
div.selected { div.selected {
.gpu-button:not(.selected) { .gpu-button[data-v-79125ff6]:not(.selected) {
opacity: 0.5; opacity: 0.5;
} }
.gpu-button:not(.selected):hover { .gpu-button[data-v-79125ff6]:not(.selected):hover {
opacity: 1; opacity: 1;
} }
} }
.gpu-button { .gpu-button[data-v-79125ff6] {
margin: 0px; margin: 0px;
display: flex; display: flex;
width: 50%; width: 50%;
@ -43,37 +45,37 @@ div.selected {
transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1);
transition-duration: 150ms; transition-duration: 150ms;
} }
.gpu-button:hover { .gpu-button[data-v-79125ff6]:hover {
--tw-bg-opacity: 0.75; --tw-bg-opacity: 0.75;
} }
.gpu-button { .gpu-button {
&.selected { &.selected[data-v-79125ff6] {
--tw-bg-opacity: 1; --tw-bg-opacity: 1;
background-color: rgb(64 64 64 / var(--tw-bg-opacity)); background-color: rgb(64 64 64 / var(--tw-bg-opacity));
} }
&.selected { &.selected[data-v-79125ff6] {
--tw-bg-opacity: 0.5; --tw-bg-opacity: 0.5;
} }
&.selected { &.selected[data-v-79125ff6] {
opacity: 1; opacity: 1;
} }
&.selected:hover { &.selected[data-v-79125ff6]:hover {
--tw-bg-opacity: 0.6; --tw-bg-opacity: 0.6;
} }
} }
.disabled { .disabled[data-v-79125ff6] {
pointer-events: none; pointer-events: none;
opacity: 0.4; opacity: 0.4;
} }
.p-card-header { .p-card-header[data-v-79125ff6] {
flex-grow: 1; flex-grow: 1;
text-align: center; text-align: center;
} }
.p-card-body { .p-card-body[data-v-79125ff6] {
padding-top: 0px; padding-top: 0px;
text-align: center; text-align: center;
} }
[data-v-de33872d] .p-steppanel { [data-v-cd6731d2] .p-steppanel {
background-color: transparent background-color: transparent
} }

View File

@ -1,10 +1,9 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, c as computed, o as openBlock, f as createElementBlock, F as Fragment, E as renderList, N as createVNode, M as withCtx, aE as createTextVNode, X as toDisplayString, j as unref, aI as script, I as createCommentVNode, ab as ref, cn as FilterMatchMode, a$ as useKeybindingStore, a2 as useCommandStore, a1 as useI18n, af as normalizeI18nKey, w as watchEffect, bs as useToast, r as resolveDirective, k as createBlock, co as SearchBox, H as createBaseVNode, l as script$2, av as script$4, bM as withModifiers, bZ as script$5, aP as script$6, i as withDirectives, cp as _sfc_main$2, aL as pushScopeId, aM as popScopeId, cq as KeyComboImpl, cr as KeybindingImpl, _ as _export_sfc } from "./index-DjNHn37O.js"; import { d as defineComponent, c as computed, o as openBlock, f as createElementBlock, F as Fragment, D as renderList, k as createVNode, z as withCtx, a7 as createTextVNode, E as toDisplayString, j as unref, a4 as script, B as createCommentVNode, U as ref, dl as FilterMatchMode, an as useKeybindingStore, L as useCommandStore, K as useI18n, Y as normalizeI18nKey, w as watchEffect, aR as useToast, r as resolveDirective, y as createBlock, dm as SearchBox, m as createBaseVNode, l as script$2, bg as script$4, ar as withModifiers, bj as script$5, ab as script$6, i as withDirectives, dn as _sfc_main$2, dp as KeyComboImpl, dq as KeybindingImpl, _ as _export_sfc } from "./index-DqqhYDnY.js";
import { s as script$1, a as script$3 } from "./index-B5F0uxTQ.js"; import { g as script$1, h as script$3 } from "./index-BapOFhAR.js";
import { u as useKeybindingService } from "./keybindingService-Bx7YdkXn.js"; import { u as useKeybindingService } from "./keybindingService-DEgCutrm.js";
import "./index-B-aVupP5.js"; import "./index-DXE47DZl.js";
import "./index-5HFeZax4.js";
const _hoisted_1$1 = { const _hoisted_1$1 = {
key: 0, key: 0,
class: "px-2" class: "px-2"
@ -37,7 +36,6 @@ const _sfc_main$1 = /* @__PURE__ */ defineComponent({
}; };
} }
}); });
const _withScopeId = /* @__PURE__ */ __name((n) => (pushScopeId("data-v-2554ab36"), n = n(), popScopeId(), n), "_withScopeId");
const _hoisted_1 = { class: "actions invisible flex flex-row" }; const _hoisted_1 = { class: "actions invisible flex flex-row" };
const _hoisted_2 = ["title"]; const _hoisted_2 = ["title"];
const _hoisted_3 = { key: 1 }; const _hoisted_3 = { key: 1 };
@ -248,7 +246,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
severity: "error" severity: "error"
}, { }, {
default: withCtx(() => [ default: withCtx(() => [
createTextVNode(" Keybinding already exists on "), _cache[3] || (_cache[3] = createTextVNode(" Keybinding already exists on ")),
createVNode(unref(script), { createVNode(unref(script), {
severity: "secondary", severity: "secondary",
value: existingKeybindingOnCombo.value.commandId value: existingKeybindingOnCombo.value.commandId
@ -281,4 +279,4 @@ const KeybindingPanel = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "d
export { export {
KeybindingPanel as default KeybindingPanel as default
}; };
//# sourceMappingURL=KeybindingPanel-Dc3C4lG1.js.map //# sourceMappingURL=KeybindingPanel-CeHhC2F4.js.map

87
web/assets/MaintenanceView-Bj5_Vr6o.css generated vendored Normal file
View File

@ -0,0 +1,87 @@
.task-card-ok[data-v-c3bd7658] {
position: absolute;
right: -1rem;
bottom: -1rem;
grid-column: 1 / -1;
grid-row: 1 / -1;
--tw-text-opacity: 1;
color: rgb(150 206 76 / var(--tw-text-opacity));
opacity: 1;
transition-property: opacity;
transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1);
transition-duration: 150ms;
font-size: 4rem;
text-shadow: 0.25rem 0 0.5rem black;
z-index: 10;
}
.p-card {
&[data-v-c3bd7658] {
transition-property: opacity;
transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1);
transition-duration: 150ms;
--p-card-background: var(--p-button-secondary-background);
opacity: 0.9;
}
&.opacity-65[data-v-c3bd7658] {
opacity: 0.4;
}
&[data-v-c3bd7658]:hover {
opacity: 1;
}
}
[data-v-c3bd7658] .p-card-header {
z-index: 0;
}
[data-v-c3bd7658] .p-card-body {
z-index: 1;
flex-grow: 1;
justify-content: space-between;
}
.task-div {
> i[data-v-c3bd7658] {
pointer-events: none;
}
&:hover > i[data-v-c3bd7658] {
opacity: 0.2;
}
}
[data-v-74b78f7d] .p-tag {
--p-tag-gap: 0.375rem;
}
.backspan[data-v-74b78f7d]::before {
position: absolute;
margin: 0px;
color: var(--p-text-muted-color);
font-family: 'primeicons';
top: -2rem;
right: -2rem;
speak: none;
font-style: normal;
font-weight: normal;
font-variant: normal;
text-transform: none;
line-height: 1;
display: inline-block;
-webkit-font-smoothing: antialiased;
opacity: 0.02;
font-size: min(14rem, 90vw);
z-index: 0;
}

26033
web/assets/MaintenanceView-Df7CHNWW.js generated vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -1,7 +1,7 @@
:root { .p-tag[data-v-dc169863] {
--p-tag-gap: 0.5rem; --p-tag-gap: 0.5rem;
} }
.comfy-installer { .comfy-installer[data-v-dc169863] {
margin-top: max(1rem, max(0px, calc((100vh - 42rem) * 0.5))); margin-top: max(1rem, max(0px, calc((100vh - 42rem) * 0.5)));
} }

View File

@ -1,9 +1,7 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, a1 as useI18n, ab as ref, m as onMounted, o as openBlock, k as createBlock, M as withCtx, H as createBaseVNode, X as toDisplayString, N as createVNode, j as unref, aI as script, l as script$2, c0 as electronAPI } from "./index-DjNHn37O.js"; import { d as defineComponent, K as useI18n, U as ref, p as onMounted, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, a4 as script, a$ as script$1, l as script$2, b5 as electronAPI, _ as _export_sfc } from "./index-DqqhYDnY.js";
import { s as script$1 } from "./index-jXPKy3pP.js"; import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BNGF4K22.js";
import "./index-5HFeZax4.js";
const _hoisted_1 = { class: "comfy-installer grow flex flex-col gap-4 text-neutral-300 max-w-110" }; const _hoisted_1 = { class: "comfy-installer grow flex flex-col gap-4 text-neutral-300 max-w-110" };
const _hoisted_2 = { class: "text-2xl font-semibold text-neutral-100" }; const _hoisted_2 = { class: "text-2xl font-semibold text-neutral-100" };
const _hoisted_3 = { class: "m-1 text-neutral-300" }; const _hoisted_3 = { class: "m-1 text-neutral-300" };
@ -69,7 +67,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
}; };
} }
}); });
const ManualConfigurationView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-dc169863"]]);
export { export {
_sfc_main as default ManualConfigurationView as default
}; };
//# sourceMappingURL=ManualConfigurationView-Bi_qHE-n.js.map //# sourceMappingURL=ManualConfigurationView-Cz0_f_T-.js.map

86
web/assets/MetricsConsentView-B5NlgqrS.js generated vendored Normal file
View File

@ -0,0 +1,86 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
import { d as defineComponent, aR as useToast, K as useI18n, U as ref, be as useRouter, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, a7 as createTextVNode, k as createVNode, j as unref, bn as script, l as script$1, b5 as electronAPI } from "./index-DqqhYDnY.js";
const _hoisted_1 = { class: "h-full p-8 2xl:p-16 flex flex-col items-center justify-center" };
const _hoisted_2 = { class: "bg-neutral-800 rounded-lg shadow-lg p-6 w-full max-w-[600px] flex flex-col gap-6" };
const _hoisted_3 = { class: "text-3xl font-semibold text-neutral-100" };
const _hoisted_4 = { class: "text-neutral-400" };
const _hoisted_5 = { class: "text-neutral-400" };
const _hoisted_6 = {
href: "https://comfy.org/privacy",
target: "_blank",
class: "text-blue-400 hover:text-blue-300 underline"
};
const _hoisted_7 = { class: "flex items-center gap-4" };
const _hoisted_8 = {
id: "metricsDescription",
class: "text-neutral-100"
};
const _hoisted_9 = { class: "flex pt-6 justify-end" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "MetricsConsentView",
setup(__props) {
const toast = useToast();
const { t } = useI18n();
const allowMetrics = ref(true);
const router = useRouter();
const isUpdating = ref(false);
const updateConsent = /* @__PURE__ */ __name(async () => {
isUpdating.value = true;
try {
await electronAPI().setMetricsConsent(allowMetrics.value);
} catch (error) {
toast.add({
severity: "error",
summary: t("install.errorUpdatingConsent"),
detail: t("install.errorUpdatingConsentDetail"),
life: 3e3
});
} finally {
isUpdating.value = false;
}
router.push("/");
}, "updateConsent");
return (_ctx, _cache) => {
const _component_BaseViewTemplate = _sfc_main$1;
return openBlock(), createBlock(_component_BaseViewTemplate, { dark: "" }, {
default: withCtx(() => [
createBaseVNode("div", _hoisted_1, [
createBaseVNode("div", _hoisted_2, [
createBaseVNode("h2", _hoisted_3, toDisplayString(_ctx.$t("install.helpImprove")), 1),
createBaseVNode("p", _hoisted_4, toDisplayString(_ctx.$t("install.updateConsent")), 1),
createBaseVNode("p", _hoisted_5, [
createTextVNode(toDisplayString(_ctx.$t("install.moreInfo")) + " ", 1),
createBaseVNode("a", _hoisted_6, toDisplayString(_ctx.$t("install.privacyPolicy")), 1),
_cache[1] || (_cache[1] = createTextVNode(". "))
]),
createBaseVNode("div", _hoisted_7, [
createVNode(unref(script), {
modelValue: allowMetrics.value,
"onUpdate:modelValue": _cache[0] || (_cache[0] = ($event) => allowMetrics.value = $event),
"aria-describedby": "metricsDescription"
}, null, 8, ["modelValue"]),
createBaseVNode("span", _hoisted_8, toDisplayString(allowMetrics.value ? _ctx.$t("install.metricsEnabled") : _ctx.$t("install.metricsDisabled")), 1)
]),
createBaseVNode("div", _hoisted_9, [
createVNode(unref(script$1), {
label: _ctx.$t("g.ok"),
icon: "pi pi-check",
loading: isUpdating.value,
iconPos: "right",
onClick: updateConsent
}, null, 8, ["label", "loading"])
])
])
])
]),
_: 1
});
};
}
});
export {
_sfc_main as default
};
//# sourceMappingURL=MetricsConsentView-B5NlgqrS.js.map

View File

@ -1,21 +1,16 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, bW as useRouter, r as resolveDirective, o as openBlock, k as createBlock, M as withCtx, H as createBaseVNode, X as toDisplayString, N as createVNode, j as unref, l as script, i as withDirectives } from "./index-DjNHn37O.js"; import { d as defineComponent, be as useRouter, r as resolveDirective, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, l as script, i as withDirectives, _ as _export_sfc } from "./index-DqqhYDnY.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BNGF4K22.js"; import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
const _imports_0 = "" + new URL("images/sad_girl.png", import.meta.url).href; const _imports_0 = "" + new URL("images/sad_girl.png", import.meta.url).href;
const _hoisted_1 = { class: "sad-container" }; const _hoisted_1 = { class: "sad-container" };
const _hoisted_2 = /* @__PURE__ */ createBaseVNode("img", { const _hoisted_2 = { class: "no-drag sad-text flex items-center" };
class: "sad-girl", const _hoisted_3 = { class: "flex flex-col gap-8 p-8 min-w-110" };
src: _imports_0, const _hoisted_4 = { class: "text-4xl font-bold text-red-500" };
alt: "Sad girl illustration" const _hoisted_5 = { class: "space-y-4" };
}, null, -1); const _hoisted_6 = { class: "text-xl" };
const _hoisted_3 = { class: "no-drag sad-text flex items-center" }; const _hoisted_7 = { class: "list-disc list-inside space-y-1 text-neutral-800" };
const _hoisted_4 = { class: "flex flex-col gap-8 p-8 min-w-110" }; const _hoisted_8 = { class: "flex gap-4" };
const _hoisted_5 = { class: "text-4xl font-bold text-red-500" };
const _hoisted_6 = { class: "space-y-4" };
const _hoisted_7 = { class: "text-xl" };
const _hoisted_8 = { class: "list-disc list-inside space-y-1 text-neutral-800" };
const _hoisted_9 = { class: "flex gap-4" };
const _sfc_main = /* @__PURE__ */ defineComponent({ const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "NotSupportedView", __name: "NotSupportedView",
setup(__props) { setup(__props) {
@ -37,18 +32,22 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
return openBlock(), createBlock(_sfc_main$1, null, { return openBlock(), createBlock(_sfc_main$1, null, {
default: withCtx(() => [ default: withCtx(() => [
createBaseVNode("div", _hoisted_1, [ createBaseVNode("div", _hoisted_1, [
_hoisted_2, _cache[0] || (_cache[0] = createBaseVNode("img", {
createBaseVNode("div", _hoisted_3, [ class: "sad-girl",
createBaseVNode("div", _hoisted_4, [ src: _imports_0,
createBaseVNode("h1", _hoisted_5, toDisplayString(_ctx.$t("notSupported.title")), 1), alt: "Sad girl illustration"
createBaseVNode("div", _hoisted_6, [ }, null, -1)),
createBaseVNode("p", _hoisted_7, toDisplayString(_ctx.$t("notSupported.message")), 1), createBaseVNode("div", _hoisted_2, [
createBaseVNode("ul", _hoisted_8, [ createBaseVNode("div", _hoisted_3, [
createBaseVNode("h1", _hoisted_4, toDisplayString(_ctx.$t("notSupported.title")), 1),
createBaseVNode("div", _hoisted_5, [
createBaseVNode("p", _hoisted_6, toDisplayString(_ctx.$t("notSupported.message")), 1),
createBaseVNode("ul", _hoisted_7, [
createBaseVNode("li", null, toDisplayString(_ctx.$t("notSupported.supportedDevices.macos")), 1), createBaseVNode("li", null, toDisplayString(_ctx.$t("notSupported.supportedDevices.macos")), 1),
createBaseVNode("li", null, toDisplayString(_ctx.$t("notSupported.supportedDevices.windows")), 1) createBaseVNode("li", null, toDisplayString(_ctx.$t("notSupported.supportedDevices.windows")), 1)
]) ])
]), ]),
createBaseVNode("div", _hoisted_9, [ createBaseVNode("div", _hoisted_8, [
createVNode(unref(script), { createVNode(unref(script), {
label: _ctx.$t("notSupported.learnMore"), label: _ctx.$t("notSupported.learnMore"),
icon: "pi pi-github", icon: "pi pi-github",
@ -80,7 +79,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
}; };
} }
}); });
const NotSupportedView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-ebb20958"]]);
export { export {
_sfc_main as default NotSupportedView as default
}; };
//# sourceMappingURL=NotSupportedView-Drz3x2d-.js.map //# sourceMappingURL=NotSupportedView-BUpntA4x.js.map

View File

@ -1,17 +1,19 @@
.sad-container { .sad-container {
&[data-v-ebb20958] {
display: grid; display: grid;
align-items: center; align-items: center;
justify-content: space-evenly; justify-content: space-evenly;
grid-template-columns: 25rem 1fr; grid-template-columns: 25rem 1fr;
& > * { }
&[data-v-ebb20958] > * {
grid-row: 1; grid-row: 1;
} }
} }
.sad-text { .sad-text[data-v-ebb20958] {
grid-column: 1/3; grid-column: 1/3;
} }
.sad-girl { .sad-girl[data-v-ebb20958] {
grid-column: 2/3; grid-column: 2/3;
width: min(75vw, 100vh); width: min(75vw, 100vh);
} }

View File

@ -1,25 +1,23 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { H as createBaseVNode, o as openBlock, f as createElementBlock, Z as markRaw, d as defineComponent, a as useSettingStore, aS as storeToRefs, a5 as watch, cO as useCopyToClipboard, a1 as useI18n, k as createBlock, M as withCtx, j as unref, bZ as script, X as toDisplayString, E as renderList, F as Fragment, N as createVNode, l as script$1, I as createCommentVNode, bQ as script$2, cP as FormItem, cp as _sfc_main$1, c0 as electronAPI } from "./index-DjNHn37O.js"; import { o as openBlock, f as createElementBlock, m as createBaseVNode, H as markRaw, d as defineComponent, a as useSettingStore, ae as storeToRefs, O as watch, dy as useCopyToClipboard, K as useI18n, y as createBlock, z as withCtx, j as unref, bj as script, E as toDisplayString, D as renderList, F as Fragment, k as createVNode, l as script$1, B as createCommentVNode, bh as script$2, dz as FormItem, dn as _sfc_main$1, b5 as electronAPI } from "./index-DqqhYDnY.js";
import { u as useServerConfigStore } from "./serverConfigStore-CvyKFVuP.js"; import { u as useServerConfigStore } from "./serverConfigStore-Kb5DJVFt.js";
const _hoisted_1$1 = { const _hoisted_1$1 = {
viewBox: "0 0 24 24", viewBox: "0 0 24 24",
width: "1.2em", width: "1.2em",
height: "1.2em" height: "1.2em"
}; };
const _hoisted_2$1 = /* @__PURE__ */ createBaseVNode("path", {
fill: "none",
stroke: "currentColor",
"stroke-linecap": "round",
"stroke-linejoin": "round",
"stroke-width": "2",
d: "m4 17l6-6l-6-6m8 14h8"
}, null, -1);
const _hoisted_3$1 = [
_hoisted_2$1
];
function render(_ctx, _cache) { function render(_ctx, _cache) {
return openBlock(), createElementBlock("svg", _hoisted_1$1, [..._hoisted_3$1]); return openBlock(), createElementBlock("svg", _hoisted_1$1, _cache[0] || (_cache[0] = [
createBaseVNode("path", {
fill: "none",
stroke: "currentColor",
"stroke-linecap": "round",
"stroke-linejoin": "round",
"stroke-width": "2",
d: "m4 17l6-6l-6-6m8 14h8"
}, null, -1)
]));
} }
__name(render, "render"); __name(render, "render");
const __unplugin_components_0 = markRaw({ name: "lucide-terminal", render }); const __unplugin_components_0 = markRaw({ name: "lucide-terminal", render });
@ -155,4 +153,4 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
export { export {
_sfc_main as default _sfc_main as default
}; };
//# sourceMappingURL=ServerConfigPanel-Be4StJmv.js.map //# sourceMappingURL=ServerConfigPanel-B1lI5M9c.js.map

100
web/assets/ServerStartView-BpH4TXPO.js generated vendored Normal file
View File

@ -0,0 +1,100 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, K as useI18n, U as ref, bk as ProgressStatus, p as onMounted, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, a7 as createTextVNode, E as toDisplayString, j as unref, f as createElementBlock, B as createCommentVNode, k as createVNode, l as script, i as withDirectives, v as vShow, bl as BaseTerminal, b5 as electronAPI, _ as _export_sfc } from "./index-DqqhYDnY.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
const _hoisted_1 = { class: "flex flex-col w-full h-full items-center" };
const _hoisted_2 = { class: "text-2xl font-bold" };
const _hoisted_3 = { key: 0 };
const _hoisted_4 = {
key: 0,
class: "flex flex-col items-center gap-4"
};
const _hoisted_5 = { class: "flex items-center my-4 gap-2" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "ServerStartView",
setup(__props) {
const electron = electronAPI();
const { t } = useI18n();
const status = ref(ProgressStatus.INITIAL_STATE);
const electronVersion = ref("");
let xterm;
const terminalVisible = ref(true);
const updateProgress = /* @__PURE__ */ __name(({ status: newStatus }) => {
status.value = newStatus;
if (newStatus === ProgressStatus.ERROR) terminalVisible.value = false;
else xterm?.clear();
}, "updateProgress");
const terminalCreated = /* @__PURE__ */ __name(({ terminal, useAutoSize }, root) => {
xterm = terminal;
useAutoSize({ root, autoRows: true, autoCols: true });
electron.onLogMessage((message) => {
terminal.write(message);
});
terminal.options.cursorBlink = false;
terminal.options.disableStdin = true;
terminal.options.cursorInactiveStyle = "block";
}, "terminalCreated");
const reinstall = /* @__PURE__ */ __name(() => electron.reinstall(), "reinstall");
const reportIssue = /* @__PURE__ */ __name(() => {
window.open("https://forum.comfy.org/c/v1-feedback/", "_blank");
}, "reportIssue");
const openLogs = /* @__PURE__ */ __name(() => electron.openLogsFolder(), "openLogs");
onMounted(async () => {
electron.sendReady();
electron.onProgressUpdate(updateProgress);
electronVersion.value = await electron.getElectronVersion();
});
return (_ctx, _cache) => {
return openBlock(), createBlock(_sfc_main$1, {
dark: "",
class: "flex-col"
}, {
default: withCtx(() => [
createBaseVNode("div", _hoisted_1, [
createBaseVNode("h2", _hoisted_2, [
createTextVNode(toDisplayString(unref(t)(`serverStart.process.${status.value}`)) + " ", 1),
status.value === unref(ProgressStatus).ERROR ? (openBlock(), createElementBlock("span", _hoisted_3, " v" + toDisplayString(electronVersion.value), 1)) : createCommentVNode("", true)
]),
status.value === unref(ProgressStatus).ERROR ? (openBlock(), createElementBlock("div", _hoisted_4, [
createBaseVNode("div", _hoisted_5, [
createVNode(unref(script), {
icon: "pi pi-flag",
severity: "secondary",
label: unref(t)("serverStart.reportIssue"),
onClick: reportIssue
}, null, 8, ["label"]),
createVNode(unref(script), {
icon: "pi pi-file",
severity: "secondary",
label: unref(t)("serverStart.openLogs"),
onClick: openLogs
}, null, 8, ["label"]),
createVNode(unref(script), {
icon: "pi pi-refresh",
label: unref(t)("serverStart.reinstall"),
onClick: reinstall
}, null, 8, ["label"])
]),
!terminalVisible.value ? (openBlock(), createBlock(unref(script), {
key: 0,
icon: "pi pi-search",
severity: "secondary",
label: unref(t)("serverStart.showTerminal"),
onClick: _cache[0] || (_cache[0] = ($event) => terminalVisible.value = true)
}, null, 8, ["label"])) : createCommentVNode("", true)
])) : createCommentVNode("", true),
withDirectives(createVNode(BaseTerminal, { onCreated: terminalCreated }, null, 512), [
[vShow, terminalVisible.value]
])
])
]),
_: 1
});
};
}
});
const ServerStartView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-4140d62b"]]);
export {
ServerStartView as default
};
//# sourceMappingURL=ServerStartView-BpH4TXPO.js.map

View File

@ -1,98 +0,0 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, a1 as useI18n, ab as ref, b_ as ProgressStatus, m as onMounted, o as openBlock, k as createBlock, M as withCtx, H as createBaseVNode, aE as createTextVNode, X as toDisplayString, j as unref, f as createElementBlock, I as createCommentVNode, N as createVNode, l as script, i as withDirectives, v as vShow, b$ as BaseTerminal, aL as pushScopeId, aM as popScopeId, c0 as electronAPI, _ as _export_sfc } from "./index-DjNHn37O.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BNGF4K22.js";
const _withScopeId = /* @__PURE__ */ __name((n) => (pushScopeId("data-v-42c1131d"), n = n(), popScopeId(), n), "_withScopeId");
const _hoisted_1 = { class: "text-2xl font-bold" };
const _hoisted_2 = { key: 0 };
const _hoisted_3 = {
key: 0,
class: "flex flex-col items-center gap-4"
};
const _hoisted_4 = { class: "flex items-center my-4 gap-2" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "ServerStartView",
setup(__props) {
const electron = electronAPI();
const { t } = useI18n();
const status = ref(ProgressStatus.INITIAL_STATE);
const electronVersion = ref("");
let xterm;
const terminalVisible = ref(true);
const updateProgress = /* @__PURE__ */ __name(({ status: newStatus }) => {
status.value = newStatus;
if (newStatus === ProgressStatus.ERROR) terminalVisible.value = false;
else xterm?.clear();
}, "updateProgress");
const terminalCreated = /* @__PURE__ */ __name(({ terminal, useAutoSize }, root) => {
xterm = terminal;
useAutoSize(root, true, true);
electron.onLogMessage((message) => {
terminal.write(message);
});
terminal.options.cursorBlink = false;
terminal.options.disableStdin = true;
terminal.options.cursorInactiveStyle = "block";
}, "terminalCreated");
const reinstall = /* @__PURE__ */ __name(() => electron.reinstall(), "reinstall");
const reportIssue = /* @__PURE__ */ __name(() => {
window.open("https://forum.comfy.org/c/v1-feedback/", "_blank");
}, "reportIssue");
const openLogs = /* @__PURE__ */ __name(() => electron.openLogsFolder(), "openLogs");
onMounted(async () => {
electron.sendReady();
electron.onProgressUpdate(updateProgress);
electronVersion.value = await electron.getElectronVersion();
});
return (_ctx, _cache) => {
return openBlock(), createBlock(_sfc_main$1, {
dark: "",
class: "flex-col"
}, {
default: withCtx(() => [
createBaseVNode("h2", _hoisted_1, [
createTextVNode(toDisplayString(unref(t)(`serverStart.process.${status.value}`)) + " ", 1),
status.value === unref(ProgressStatus).ERROR ? (openBlock(), createElementBlock("span", _hoisted_2, " v" + toDisplayString(electronVersion.value), 1)) : createCommentVNode("", true)
]),
status.value === unref(ProgressStatus).ERROR ? (openBlock(), createElementBlock("div", _hoisted_3, [
createBaseVNode("div", _hoisted_4, [
createVNode(unref(script), {
icon: "pi pi-flag",
severity: "secondary",
label: unref(t)("serverStart.reportIssue"),
onClick: reportIssue
}, null, 8, ["label"]),
createVNode(unref(script), {
icon: "pi pi-file",
severity: "secondary",
label: unref(t)("serverStart.openLogs"),
onClick: openLogs
}, null, 8, ["label"]),
createVNode(unref(script), {
icon: "pi pi-refresh",
label: unref(t)("serverStart.reinstall"),
onClick: reinstall
}, null, 8, ["label"])
]),
!terminalVisible.value ? (openBlock(), createBlock(unref(script), {
key: 0,
icon: "pi pi-search",
severity: "secondary",
label: unref(t)("serverStart.showTerminal"),
onClick: _cache[0] || (_cache[0] = ($event) => terminalVisible.value = true)
}, null, 8, ["label"])) : createCommentVNode("", true)
])) : createCommentVNode("", true),
withDirectives(createVNode(BaseTerminal, { onCreated: terminalCreated }, null, 512), [
[vShow, terminalVisible.value]
])
]),
_: 1
});
};
}
});
const ServerStartView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-42c1131d"]]);
export {
ServerStartView as default
};
//# sourceMappingURL=ServerStartView-CIDTUh4x.js.map

View File

@ -1,5 +1,5 @@
[data-v-42c1131d] .xterm-helper-textarea { [data-v-4140d62b] .xterm-helper-textarea {
/* Hide this as it moves all over when uv is running */ /* Hide this as it moves all over when uv is running */
display: none; display: none;
} }

View File

@ -1,18 +1,17 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, aX as useUserStore, bW as useRouter, ab as ref, c as computed, m as onMounted, o as openBlock, k as createBlock, M as withCtx, H as createBaseVNode, X as toDisplayString, N as createVNode, bX as withKeys, j as unref, av as script, bQ as script$1, bY as script$2, bZ as script$3, aE as createTextVNode, I as createCommentVNode, l as script$4 } from "./index-DjNHn37O.js"; import { d as defineComponent, aj as useUserStore, be as useRouter, U as ref, c as computed, p as onMounted, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, bf as withKeys, j as unref, bg as script, bh as script$1, bi as script$2, bj as script$3, a7 as createTextVNode, B as createCommentVNode, l as script$4 } from "./index-DqqhYDnY.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BNGF4K22.js"; import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
const _hoisted_1 = { const _hoisted_1 = {
id: "comfy-user-selection", id: "comfy-user-selection",
class: "min-w-84 relative rounded-lg bg-[var(--comfy-menu-bg)] p-5 px-10 shadow-lg" class: "min-w-84 relative rounded-lg bg-[var(--comfy-menu-bg)] p-5 px-10 shadow-lg"
}; };
const _hoisted_2 = /* @__PURE__ */ createBaseVNode("h1", { class: "my-2.5 mb-7 font-normal" }, "ComfyUI", -1); const _hoisted_2 = { class: "flex w-full flex-col items-center" };
const _hoisted_3 = { class: "flex w-full flex-col items-center" }; const _hoisted_3 = { class: "flex w-full flex-col gap-2" };
const _hoisted_4 = { class: "flex w-full flex-col gap-2" }; const _hoisted_4 = { for: "new-user-input" };
const _hoisted_5 = { for: "new-user-input" }; const _hoisted_5 = { class: "flex w-full flex-col gap-2" };
const _hoisted_6 = { class: "flex w-full flex-col gap-2" }; const _hoisted_6 = { for: "existing-user-select" };
const _hoisted_7 = { for: "existing-user-select" }; const _hoisted_7 = { class: "mt-5" };
const _hoisted_8 = { class: "mt-5" };
const _sfc_main = /* @__PURE__ */ defineComponent({ const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "UserSelectView", __name: "UserSelectView",
setup(__props) { setup(__props) {
@ -47,10 +46,10 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
return openBlock(), createBlock(_sfc_main$1, { dark: "" }, { return openBlock(), createBlock(_sfc_main$1, { dark: "" }, {
default: withCtx(() => [ default: withCtx(() => [
createBaseVNode("main", _hoisted_1, [ createBaseVNode("main", _hoisted_1, [
_hoisted_2, _cache[2] || (_cache[2] = createBaseVNode("h1", { class: "my-2.5 mb-7 font-normal" }, "ComfyUI", -1)),
createBaseVNode("div", _hoisted_3, [ createBaseVNode("div", _hoisted_2, [
createBaseVNode("div", _hoisted_4, [ createBaseVNode("div", _hoisted_3, [
createBaseVNode("label", _hoisted_5, toDisplayString(_ctx.$t("userSelect.newUser")) + ":", 1), createBaseVNode("label", _hoisted_4, toDisplayString(_ctx.$t("userSelect.newUser")) + ":", 1),
createVNode(unref(script), { createVNode(unref(script), {
id: "new-user-input", id: "new-user-input",
modelValue: newUsername.value, modelValue: newUsername.value,
@ -60,8 +59,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
}, null, 8, ["modelValue", "placeholder"]) }, null, 8, ["modelValue", "placeholder"])
]), ]),
createVNode(unref(script$1)), createVNode(unref(script$1)),
createBaseVNode("div", _hoisted_6, [ createBaseVNode("div", _hoisted_5, [
createBaseVNode("label", _hoisted_7, toDisplayString(_ctx.$t("userSelect.existingUser")) + ":", 1), createBaseVNode("label", _hoisted_6, toDisplayString(_ctx.$t("userSelect.existingUser")) + ":", 1),
createVNode(unref(script$2), { createVNode(unref(script$2), {
modelValue: selectedUser.value, modelValue: selectedUser.value,
"onUpdate:modelValue": _cache[1] || (_cache[1] = ($event) => selectedUser.value = $event), "onUpdate:modelValue": _cache[1] || (_cache[1] = ($event) => selectedUser.value = $event),
@ -82,7 +81,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
_: 1 _: 1
})) : createCommentVNode("", true) })) : createCommentVNode("", true)
]), ]),
createBaseVNode("footer", _hoisted_8, [ createBaseVNode("footer", _hoisted_7, [
createVNode(unref(script$4), { createVNode(unref(script$4), {
label: _ctx.$t("userSelect.next"), label: _ctx.$t("userSelect.next"),
onClick: login onClick: login
@ -99,4 +98,4 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
export { export {
_sfc_main as default _sfc_main as default
}; };
//# sourceMappingURL=UserSelectView-B3jYchWu.js.map //# sourceMappingURL=UserSelectView-wxa07xPk.js.map

View File

@ -1,8 +1,7 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, bW as useRouter, o as openBlock, k as createBlock, M as withCtx, H as createBaseVNode, X as toDisplayString, N as createVNode, j as unref, l as script, aL as pushScopeId, aM as popScopeId, _ as _export_sfc } from "./index-DjNHn37O.js"; import { d as defineComponent, be as useRouter, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, l as script, _ as _export_sfc } from "./index-DqqhYDnY.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BNGF4K22.js"; import { _ as _sfc_main$1 } from "./BaseViewTemplate-Cz111_1A.js";
const _withScopeId = /* @__PURE__ */ __name((n) => (pushScopeId("data-v-7dfaf74c"), n = n(), popScopeId(), n), "_withScopeId");
const _hoisted_1 = { class: "flex flex-col items-center justify-center gap-8 p-8" }; const _hoisted_1 = { class: "flex flex-col items-center justify-center gap-8 p-8" };
const _hoisted_2 = { class: "animated-gradient-text text-glow select-none" }; const _hoisted_2 = { class: "animated-gradient-text text-glow select-none" };
const _sfc_main = /* @__PURE__ */ defineComponent({ const _sfc_main = /* @__PURE__ */ defineComponent({
@ -37,4 +36,4 @@ const WelcomeView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-
export { export {
WelcomeView as default WelcomeView as default
}; };
//# sourceMappingURL=WelcomeView-N0ZXLjdi.js.map //# sourceMappingURL=WelcomeView-BrXELNIm.js.map

27
web/assets/index-5HFeZax4.js generated vendored
View File

@ -1,27 +0,0 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { ct as script$1, H as createBaseVNode, o as openBlock, f as createElementBlock, D as mergeProps } from "./index-DjNHn37O.js";
var script = {
name: "PlusIcon",
"extends": script$1
};
var _hoisted_1 = /* @__PURE__ */ createBaseVNode("path", {
d: "M7.67742 6.32258V0.677419C7.67742 0.497757 7.60605 0.325452 7.47901 0.198411C7.35197 0.0713707 7.17966 0 7 0C6.82034 0 6.64803 0.0713707 6.52099 0.198411C6.39395 0.325452 6.32258 0.497757 6.32258 0.677419V6.32258H0.677419C0.497757 6.32258 0.325452 6.39395 0.198411 6.52099C0.0713707 6.64803 0 6.82034 0 7C0 7.17966 0.0713707 7.35197 0.198411 7.47901C0.325452 7.60605 0.497757 7.67742 0.677419 7.67742H6.32258V13.3226C6.32492 13.5015 6.39704 13.6725 6.52358 13.799C6.65012 13.9255 6.82106 13.9977 7 14C7.17966 14 7.35197 13.9286 7.47901 13.8016C7.60605 13.6745 7.67742 13.5022 7.67742 13.3226V7.67742H13.3226C13.5022 7.67742 13.6745 7.60605 13.8016 7.47901C13.9286 7.35197 14 7.17966 14 7C13.9977 6.82106 13.9255 6.65012 13.799 6.52358C13.6725 6.39704 13.5015 6.32492 13.3226 6.32258H7.67742Z",
fill: "currentColor"
}, null, -1);
var _hoisted_2 = [_hoisted_1];
function render(_ctx, _cache, $props, $setup, $data, $options) {
return openBlock(), createElementBlock("svg", mergeProps({
width: "14",
height: "14",
viewBox: "0 0 14 14",
fill: "none",
xmlns: "http://www.w3.org/2000/svg"
}, _ctx.pti()), _hoisted_2, 16);
}
__name(render, "render");
script.render = render;
export {
script as s
};
//# sourceMappingURL=index-5HFeZax4.js.map

29
web/assets/index-B-aVupP5.js generated vendored
View File

@ -1,29 +0,0 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { ct as script$1, H as createBaseVNode, o as openBlock, f as createElementBlock, D as mergeProps } from "./index-DjNHn37O.js";
var script = {
name: "BarsIcon",
"extends": script$1
};
var _hoisted_1 = /* @__PURE__ */ createBaseVNode("path", {
"fill-rule": "evenodd",
"clip-rule": "evenodd",
d: "M13.3226 3.6129H0.677419C0.497757 3.6129 0.325452 3.54152 0.198411 3.41448C0.0713707 3.28744 0 3.11514 0 2.93548C0 2.75581 0.0713707 2.58351 0.198411 2.45647C0.325452 2.32943 0.497757 2.25806 0.677419 2.25806H13.3226C13.5022 2.25806 13.6745 2.32943 13.8016 2.45647C13.9286 2.58351 14 2.75581 14 2.93548C14 3.11514 13.9286 3.28744 13.8016 3.41448C13.6745 3.54152 13.5022 3.6129 13.3226 3.6129ZM13.3226 7.67741H0.677419C0.497757 7.67741 0.325452 7.60604 0.198411 7.479C0.0713707 7.35196 0 7.17965 0 6.99999C0 6.82033 0.0713707 6.64802 0.198411 6.52098C0.325452 6.39394 0.497757 6.32257 0.677419 6.32257H13.3226C13.5022 6.32257 13.6745 6.39394 13.8016 6.52098C13.9286 6.64802 14 6.82033 14 6.99999C14 7.17965 13.9286 7.35196 13.8016 7.479C13.6745 7.60604 13.5022 7.67741 13.3226 7.67741ZM0.677419 11.7419H13.3226C13.5022 11.7419 13.6745 11.6706 13.8016 11.5435C13.9286 11.4165 14 11.2442 14 11.0645C14 10.8848 13.9286 10.7125 13.8016 10.5855C13.6745 10.4585 13.5022 10.3871 13.3226 10.3871H0.677419C0.497757 10.3871 0.325452 10.4585 0.198411 10.5855C0.0713707 10.7125 0 10.8848 0 11.0645C0 11.2442 0.0713707 11.4165 0.198411 11.5435C0.325452 11.6706 0.497757 11.7419 0.677419 11.7419Z",
fill: "currentColor"
}, null, -1);
var _hoisted_2 = [_hoisted_1];
function render(_ctx, _cache, $props, $setup, $data, $options) {
return openBlock(), createElementBlock("svg", mergeProps({
width: "14",
height: "14",
viewBox: "0 0 14 14",
fill: "none",
xmlns: "http://www.w3.org/2000/svg"
}, _ctx.pti()), _hoisted_2, 16);
}
__name(render, "render");
script.render = render;
export {
script as s
};
//# sourceMappingURL=index-B-aVupP5.js.map

539
web/assets/index-BNlqgrYT.js generated vendored Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -2101,6 +2101,15 @@
.inset-0{ .inset-0{
inset: 0px; inset: 0px;
} }
.-bottom-4{
bottom: -1rem;
}
.-right-14{
right: -3.5rem;
}
.-right-4{
right: -1rem;
}
.bottom-\[10px\]{ .bottom-\[10px\]{
bottom: 10px; bottom: 10px;
} }
@ -2131,6 +2140,15 @@
.z-\[1000\]{ .z-\[1000\]{
z-index: 1000; z-index: 1000;
} }
.z-\[9999\]{
z-index: 9999;
}
.col-span-full{
grid-column: 1 / -1;
}
.row-span-full{
grid-row: 1 / -1;
}
.m-0{ .m-0{
margin: 0px; margin: 0px;
} }
@ -2143,6 +2161,9 @@
.m-2{ .m-2{
margin: 0.5rem; margin: 0.5rem;
} }
.m-8{
margin: 2rem;
}
.mx-1{ .mx-1{
margin-left: 0.25rem; margin-left: 0.25rem;
margin-right: 0.25rem; margin-right: 0.25rem;
@ -2223,6 +2244,9 @@
.mt-5{ .mt-5{
margin-top: 1.25rem; margin-top: 1.25rem;
} }
.mt-6{
margin-top: 1.5rem;
}
.block{ .block{
display: block; display: block;
} }
@ -2253,6 +2277,12 @@
.h-0{ .h-0{
height: 0px; height: 0px;
} }
.h-1{
height: 0.25rem;
}
.h-1\/2{
height: 50%;
}
.h-16{ .h-16{
height: 4rem; height: 4rem;
} }
@ -2262,6 +2292,9 @@
.h-64{ .h-64{
height: 16rem; height: 16rem;
} }
.h-8{
height: 2rem;
}
.h-96{ .h-96{
height: 26rem; height: 26rem;
} }
@ -2271,6 +2304,9 @@
.h-\[30rem\]{ .h-\[30rem\]{
height: 30rem; height: 30rem;
} }
.h-\[var\(--comfy-topbar-height\)\]{
height: var(--comfy-topbar-height);
}
.h-full{ .h-full{
height: 100%; height: 100%;
} }
@ -2283,9 +2319,15 @@
.max-h-full{ .max-h-full{
max-height: 100%; max-height: 100%;
} }
.min-h-52{
min-height: 13rem;
}
.min-h-8{ .min-h-8{
min-height: 2rem; min-height: 2rem;
} }
.min-h-full{
min-height: 100%;
}
.min-h-screen{ .min-h-screen{
min-height: 100vh; min-height: 100vh;
} }
@ -2341,24 +2383,39 @@
.w-screen{ .w-screen{
width: 100vw; width: 100vw;
} }
.min-w-0{
min-width: 0px;
}
.min-w-110{ .min-w-110{
min-width: 32rem; min-width: 32rem;
} }
.min-w-32{
min-width: 8rem;
}
.min-w-84{ .min-w-84{
min-width: 22rem; min-width: 22rem;
} }
.min-w-96{ .min-w-96{
min-width: 26rem; min-width: 26rem;
} }
.min-w-full{
min-width: 100%;
}
.max-w-110{ .max-w-110{
max-width: 32rem; max-width: 32rem;
} }
.max-w-48{
max-width: 12rem;
}
.max-w-64{ .max-w-64{
max-width: 16rem; max-width: 16rem;
} }
.max-w-\[150px\]{ .max-w-\[150px\]{
max-width: 150px; max-width: 150px;
} }
.max-w-\[600px\]{
max-width: 600px;
}
.max-w-full{ .max-w-full{
max-width: 100%; max-width: 100%;
} }
@ -2380,6 +2437,9 @@
.grow{ .grow{
flex-grow: 1; flex-grow: 1;
} }
.border-collapse{
border-collapse: collapse;
}
.-translate-y-40{ .-translate-y-40{
--tw-translate-y: -10rem; --tw-translate-y: -10rem;
transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));
@ -2448,9 +2508,15 @@
.justify-around{ .justify-around{
justify-content: space-around; justify-content: space-around;
} }
.justify-evenly{
justify-content: space-evenly;
}
.gap-0{ .gap-0{
gap: 0px; gap: 0px;
} }
.gap-1{
gap: 0.25rem;
}
.gap-2{ .gap-2{
gap: 0.5rem; gap: 0.5rem;
} }
@ -2466,6 +2532,11 @@
.gap-8{ .gap-8{
gap: 2rem; gap: 2rem;
} }
.space-x-1 > :not([hidden]) ~ :not([hidden]){
--tw-space-x-reverse: 0;
margin-right: calc(0.25rem * var(--tw-space-x-reverse));
margin-left: calc(0.25rem * calc(1 - var(--tw-space-x-reverse)));
}
.space-y-1 > :not([hidden]) ~ :not([hidden]){ .space-y-1 > :not([hidden]) ~ :not([hidden]){
--tw-space-y-reverse: 0; --tw-space-y-reverse: 0;
margin-top: calc(0.25rem * calc(1 - var(--tw-space-y-reverse))); margin-top: calc(0.25rem * calc(1 - var(--tw-space-y-reverse)));
@ -2513,12 +2584,12 @@
.whitespace-pre-line{ .whitespace-pre-line{
white-space: pre-line; white-space: pre-line;
} }
.whitespace-pre-wrap{
white-space: pre-wrap;
}
.text-wrap{ .text-wrap{
text-wrap: wrap; text-wrap: wrap;
} }
.text-nowrap{
text-wrap: nowrap;
}
.rounded{ .rounded{
border-radius: 0.25rem; border-radius: 0.25rem;
} }
@ -2528,19 +2599,49 @@
.rounded-none{ .rounded-none{
border-radius: 0px; border-radius: 0px;
} }
.rounded-t-lg{
border-top-left-radius: 0.5rem;
border-top-right-radius: 0.5rem;
}
.border{ .border{
border-width: 1px; border-width: 1px;
} }
.border-0{
border-width: 0px;
}
.border-x-0{ .border-x-0{
border-left-width: 0px; border-left-width: 0px;
border-right-width: 0px; border-right-width: 0px;
} }
.border-y{
border-top-width: 1px;
border-bottom-width: 1px;
}
.border-b{
border-bottom-width: 1px;
}
.border-l{
border-left-width: 1px;
}
.border-r{
border-right-width: 1px;
}
.border-t-0{ .border-t-0{
border-top-width: 0px; border-top-width: 0px;
} }
.border-solid{
border-style: solid;
}
.border-hidden{
border-style: hidden;
}
.border-none{ .border-none{
border-style: none; border-style: none;
} }
.border-neutral-700{
--tw-border-opacity: 1;
border-color: rgb(64 64 64 / var(--tw-border-opacity));
}
.bg-\[var\(--comfy-menu-bg\)\]{ .bg-\[var\(--comfy-menu-bg\)\]{
background-color: var(--comfy-menu-bg); background-color: var(--comfy-menu-bg);
} }
@ -2635,6 +2736,9 @@
.p-5{ .p-5{
padding: 1.25rem; padding: 1.25rem;
} }
.p-6{
padding: 1.5rem;
}
.p-8{ .p-8{
padding: 2rem; padding: 2rem;
} }
@ -2692,6 +2796,9 @@
.text-center{ .text-center{
text-align: center; text-align: center;
} }
.text-right{
text-align: right;
}
.font-mono{ .font-mono{
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
} }
@ -2701,6 +2808,9 @@
.text-2xl{ .text-2xl{
font-size: 1.5rem; font-size: 1.5rem;
} }
.text-3xl{
font-size: 1.875rem;
}
.text-4xl{ .text-4xl{
font-size: 2.25rem; font-size: 2.25rem;
} }
@ -2783,21 +2893,40 @@
--tw-text-opacity: 1; --tw-text-opacity: 1;
color: rgb(239 68 68 / var(--tw-text-opacity)); color: rgb(239 68 68 / var(--tw-text-opacity));
} }
.underline{
text-decoration-line: underline;
}
.no-underline{ .no-underline{
text-decoration-line: none; text-decoration-line: none;
} }
.antialiased{
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}
.opacity-0{ .opacity-0{
opacity: 0; opacity: 0;
} }
.opacity-100{ .opacity-100{
opacity: 1; opacity: 1;
} }
.opacity-15{
opacity: 0.15;
}
.opacity-25{
opacity: 0.25;
}
.opacity-40{ .opacity-40{
opacity: 0.4; opacity: 0.4;
} }
.opacity-50{ .opacity-50{
opacity: 0.5; opacity: 0.5;
} }
.opacity-65{
opacity: 0.65;
}
.opacity-75{
opacity: 0.75;
}
.shadow-lg{ .shadow-lg{
--tw-shadow: 0 10px 15px -3px rgb(0 0 0 / 0.1), 0 4px 6px -4px rgb(0 0 0 / 0.1); --tw-shadow: 0 10px 15px -3px rgb(0 0 0 / 0.1), 0 4px 6px -4px rgb(0 0 0 / 0.1);
--tw-shadow-colored: 0 10px 15px -3px var(--tw-shadow-color), 0 4px 6px -4px var(--tw-shadow-color); --tw-shadow-colored: 0 10px 15px -3px var(--tw-shadow-color), 0 4px 6px -4px var(--tw-shadow-color);
@ -2845,6 +2974,9 @@
.duration-100{ .duration-100{
transition-duration: 100ms; transition-duration: 100ms;
} }
.duration-200{
transition-duration: 200ms;
}
.duration-300{ .duration-300{
transition-duration: 300ms; transition-duration: 300ms;
} }
@ -2868,6 +3000,7 @@
--bg-color: #fff; --bg-color: #fff;
--comfy-menu-bg: #353535; --comfy-menu-bg: #353535;
--comfy-menu-secondary-bg: #292929; --comfy-menu-secondary-bg: #292929;
--comfy-topbar-height: 2.5rem;
--comfy-input-bg: #222; --comfy-input-bg: #222;
--input-text: #ddd; --input-text: #ddd;
--descrip-text: #999; --descrip-text: #999;
@ -3625,24 +3758,93 @@ audio.comfy-audio.empty-audio-widget {
padding: var(--comfy-tree-explorer-item-padding) !important; padding: var(--comfy-tree-explorer-item-padding) !important;
} }
/* Load3d styles */
.comfy-load-3d,
.comfy-load-3d-animation,
.comfy-preview-3d,
.comfy-preview-3d-animation{
display: flex;
flex-direction: column;
background: transparent;
flex: 1;
position: relative;
overflow: hidden;
}
.comfy-load-3d canvas,
.comfy-load-3d-animation canvas,
.comfy-preview-3d canvas,
.comfy-preview-3d-animation canvas{
display: flex;
width: 100% !important;
height: 100% !important;
}
/* End of Load3d styles */
/* [Desktop] Electron window specific styles */
.app-drag {
app-region: drag;
}
.no-drag {
app-region: no-drag;
}
.window-actions-spacer {
width: calc(100vw - env(titlebar-area-width, 100vw));
}
/* End of [Desktop] Electron window specific styles */
.hover\:bg-neutral-700:hover{ .hover\:bg-neutral-700:hover{
--tw-bg-opacity: 1; --tw-bg-opacity: 1;
background-color: rgb(64 64 64 / var(--tw-bg-opacity)); background-color: rgb(64 64 64 / var(--tw-bg-opacity));
} }
.hover\:bg-opacity-75:hover{ .hover\:bg-opacity-75:hover{
--tw-bg-opacity: 0.75; --tw-bg-opacity: 0.75;
} }
.hover\:text-blue-300:hover{ .hover\:text-blue-300:hover{
--tw-text-opacity: 1; --tw-text-opacity: 1;
color: rgb(144 205 244 / var(--tw-text-opacity)); color: rgb(144 205 244 / var(--tw-text-opacity));
} }
.hover\:opacity-100:hover{ .hover\:opacity-100:hover{
opacity: 1; opacity: 1;
} }
@media (prefers-reduced-motion: no-preference){
.motion-safe\:w-0{
width: 0px;
}
.motion-safe\:opacity-0{
opacity: 0;
}
.group\/sidebar-tab:focus-within .motion-safe\:group-focus-within\/sidebar-tab\:w-auto{
width: auto;
}
.group\/sidebar-tab:focus-within .motion-safe\:group-focus-within\/sidebar-tab\:opacity-100{
opacity: 1;
}
.group\/sidebar-tab:hover .motion-safe\:group-hover\/sidebar-tab\:w-auto{
width: auto;
}
.group\/sidebar-tab:hover .motion-safe\:group-hover\/sidebar-tab\:opacity-100{
opacity: 1;
}
.group\/tree-node:hover .motion-safe\:group-hover\/tree-node\:opacity-100{
opacity: 1;
}
}
@media not all and (min-width: 640px){
.max-sm\:hidden{
display: none;
}
}
@media (min-width: 768px){ @media (min-width: 768px){
.md\:flex{ .md\:flex{
@ -3653,7 +3855,6 @@ audio.comfy-audio.empty-audio-widget {
display: none; display: none;
} }
} }
@media (min-width: 1536px){ @media (min-width: 1536px){
.\32xl\:mx-4{ .\32xl\:mx-4{
@ -3689,8 +3890,11 @@ audio.comfy-audio.empty-audio-widget {
padding-left: 1rem; padding-left: 1rem;
padding-right: 1rem; padding-right: 1rem;
} }
}
.\32xl\:text-sm{
font-size: 0.875rem;
}
}
@media (prefers-color-scheme: dark){ @media (prefers-color-scheme: dark){
.dark\:bg-gray-800{ .dark\:bg-gray-800{
@ -3740,17 +3944,17 @@ audio.comfy-audio.empty-audio-widget {
margin-bottom: 1rem; margin-bottom: 1rem;
} }
.comfy-error-report[data-v-ddf3e2da] { .comfy-error-report[data-v-3faf7785] {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
gap: 1rem; gap: 1rem;
} }
.action-container[data-v-ddf3e2da] { .action-container[data-v-3faf7785] {
display: flex; display: flex;
gap: 1rem; gap: 1rem;
justify-content: flex-end; justify-content: flex-end;
} }
.wrapper-pre[data-v-ddf3e2da] { .wrapper-pre[data-v-3faf7785] {
white-space: pre-wrap; white-space: pre-wrap;
word-wrap: break-word; word-wrap: break-word;
} }
@ -3768,7 +3972,7 @@ audio.comfy-audio.empty-audio-widget {
margin-left: auto; margin-left: auto;
} }
.comfy-missing-models[data-v-ebf9fccc] { .comfy-missing-models[data-v-f8d63775] {
max-height: 300px; max-height: 300px;
overflow-y: auto; overflow-y: auto;
} }
@ -3810,22 +4014,22 @@ audio.comfy-audio.empty-audio-widget {
background-color: rgb(234 179 8 / var(--tw-bg-opacity)) background-color: rgb(234 179 8 / var(--tw-bg-opacity))
} }
[data-v-ba13476b] .p-inputtext { [data-v-b3ab067d] .p-inputtext {
--p-form-field-padding-x: 0.625rem; --p-form-field-padding-x: 0.625rem;
} }
.p-button.p-inputicon[data-v-ba13476b] { .p-button.p-inputicon[data-v-b3ab067d] {
width: auto; width: auto;
border-style: none; border-style: none;
padding: 0px; padding: 0px;
} }
.form-input[data-v-e4e3022d] .input-slider .p-inputnumber input, .form-input[data-v-1451da7b] .input-slider .p-inputnumber input,
.form-input[data-v-e4e3022d] .input-slider .slider-part { .form-input[data-v-1451da7b] .input-slider .slider-part {
width: 5rem width: 5rem
} }
.form-input[data-v-e4e3022d] .p-inputtext, .form-input[data-v-1451da7b] .p-inputtext,
.form-input[data-v-e4e3022d] .p-select { .form-input[data-v-1451da7b] .p-select {
width: 11rem width: 11rem
} }
@ -3834,7 +4038,7 @@ audio.comfy-audio.empty-audio-widget {
padding-top: 0px !important; padding-top: 0px !important;
} }
.settings-container[data-v-67f71ae9] { .settings-container[data-v-2e21278f] {
display: flex; display: flex;
height: 70vh; height: 70vh;
width: 60vw; width: 60vw;
@ -3842,25 +4046,25 @@ audio.comfy-audio.empty-audio-widget {
overflow: hidden; overflow: hidden;
} }
@media (max-width: 768px) { @media (max-width: 768px) {
.settings-container[data-v-67f71ae9] { .settings-container[data-v-2e21278f] {
flex-direction: column; flex-direction: column;
height: auto; height: auto;
width: 80vw; width: 80vw;
} }
.settings-sidebar[data-v-67f71ae9] { .settings-sidebar[data-v-2e21278f] {
width: 100%; width: 100%;
} }
.settings-content[data-v-67f71ae9] { .settings-content[data-v-2e21278f] {
height: 350px; height: 350px;
} }
} }
/* Show a separator line above the Keybinding tab */ /* Show a separator line above the Keybinding tab */
/* This indicates the start of custom setting panels */ /* This indicates the start of custom setting panels */
.settings-sidebar[data-v-67f71ae9] .p-listbox-option[aria-label='Keybinding'] { .settings-sidebar[data-v-2e21278f] .p-listbox-option[aria-label='Keybinding'] {
position: relative; position: relative;
} }
.settings-sidebar[data-v-67f71ae9] .p-listbox-option[aria-label='Keybinding']::before { .settings-sidebar[data-v-2e21278f] .p-listbox-option[aria-label='Keybinding']::before {
position: absolute; position: absolute;
top: 0px; top: 0px;
left: 0px; left: 0px;
@ -3878,15 +4082,15 @@ audio.comfy-audio.empty-audio-widget {
margin-left: 0.5rem; margin-left: 0.5rem;
} }
.p-card[data-v-d65acb9a] { .p-card[data-v-ffc83afa] {
--p-card-body-padding: 10px 0 0 0; --p-card-body-padding: 10px 0 0 0;
overflow: hidden; overflow: hidden;
} }
[data-v-d65acb9a] .p-card-subtitle { [data-v-ffc83afa] .p-card-subtitle {
text-align: center; text-align: center;
} }
.carousel[data-v-fc26284b] { .carousel[data-v-d9962275] {
width: 66vw; width: 66vw;
} }
/** /**
@ -4123,18 +4327,18 @@ audio.comfy-audio.empty-audio-widget {
overflow-y: hidden; overflow-y: hidden;
} }
[data-v-6187144a] .p-terminal .xterm { [data-v-90a7f075] .p-terminal .xterm {
overflow-x: auto; overflow-x: auto;
} }
[data-v-6187144a] .p-terminal .xterm-screen { [data-v-90a7f075] .p-terminal .xterm-screen {
background-color: black; background-color: black;
overflow-y: hidden; overflow-y: hidden;
} }
[data-v-b27b58f4] .p-terminal .xterm { [data-v-03daf1c8] .p-terminal .xterm {
overflow-x: auto; overflow-x: auto;
} }
[data-v-b27b58f4] .p-terminal .xterm-screen { [data-v-03daf1c8] .p-terminal .xterm-screen {
background-color: black; background-color: black;
overflow-y: hidden; overflow-y: hidden;
} }
@ -4446,28 +4650,28 @@ audio.comfy-audio.empty-audio-widget {
box-sizing: border-box; box-sizing: border-box;
} }
.tree-node[data-v-a6457774] { .tree-node[data-v-654109c7] {
width: 100%; width: 100%;
display: flex; display: flex;
align-items: center; align-items: center;
justify-content: space-between; justify-content: space-between;
} }
.leaf-count-badge[data-v-a6457774] { .leaf-count-badge[data-v-654109c7] {
margin-left: 0.5rem; margin-left: 0.5rem;
} }
.node-content[data-v-a6457774] { .node-content[data-v-654109c7] {
display: flex; display: flex;
align-items: center; align-items: center;
flex-grow: 1; flex-grow: 1;
} }
.leaf-label[data-v-a6457774] { .leaf-label[data-v-654109c7] {
margin-left: 0.5rem; margin-left: 0.5rem;
} }
[data-v-a6457774] .editable-text span { [data-v-654109c7] .editable-text span {
word-break: break-all; word-break: break-all;
} }
[data-v-31d518da] .tree-explorer-node-label { [data-v-976a6d58] .tree-explorer-node-label {
width: 100%; width: 100%;
display: flex; display: flex;
align-items: center; align-items: center;
@ -4480,10 +4684,10 @@ audio.comfy-audio.empty-audio-widget {
* By setting the position to relative on the parent and using an absolutely positioned pseudo-element, * By setting the position to relative on the parent and using an absolutely positioned pseudo-element,
* we can create a visual indicator for the drop target without affecting the layout of other elements. * we can create a visual indicator for the drop target without affecting the layout of other elements.
*/ */
[data-v-31d518da] .p-tree-node-content:has(.tree-folder) { [data-v-976a6d58] .p-tree-node-content:has(.tree-folder) {
position: relative; position: relative;
} }
[data-v-31d518da] .p-tree-node-content:has(.tree-folder.can-drop)::after { [data-v-976a6d58] .p-tree-node-content:has(.tree-folder.can-drop)::after {
content: ''; content: '';
position: absolute; position: absolute;
top: 0; top: 0;
@ -4494,16 +4698,28 @@ audio.comfy-audio.empty-audio-widget {
pointer-events: none; pointer-events: none;
} }
[data-v-9159c070] .p-toolbar-end .p-button { [data-v-0061c432] .p-toolbar-end .p-button {
padding-top: 0.25rem; padding-top: 0.25rem;
padding-bottom: 0.25rem padding-bottom: 0.25rem
} }
@media (min-width: 1536px) { @media (min-width: 1536px) {
[data-v-9159c070] .p-toolbar-end .p-button { [data-v-0061c432] .p-toolbar-end .p-button {
padding-top: 0.5rem; padding-top: 0.5rem;
padding-bottom: 0.5rem padding-bottom: 0.5rem
} }
} }
[data-v-0061c432] .p-toolbar-start {
min-width: 0px;
flex: 1 1 0%;
overflow: hidden
}
.model_preview[data-v-32e6c4d9] { .model_preview[data-v-32e6c4d9] {
background-color: var(--comfy-menu-bg); background-color: var(--comfy-menu-bg);
@ -4579,31 +4795,6 @@ audio.comfy-audio.empty-audio-widget {
width: 16px; width: 16px;
} }
._content[data-v-c4279e6b] {
display: flex;
flex-direction: column
}
._content[data-v-c4279e6b] > :not([hidden]) ~ :not([hidden]) {
--tw-space-y-reverse: 0;
margin-top: calc(0.5rem * calc(1 - var(--tw-space-y-reverse)));
margin-bottom: calc(0.5rem * var(--tw-space-y-reverse))
}
._footer[data-v-c4279e6b] {
display: flex;
flex-direction: column;
align-items: flex-end;
padding-top: 1rem
}
.slot_row[data-v-d9792337] { .slot_row[data-v-d9792337] {
padding: 2px; padding: 2px;
} }
@ -4731,34 +4922,61 @@ audio.comfy-audio.empty-audio-widget {
color: var(--error-text); color: var(--error-text);
} }
._content[data-v-c4279e6b] {
display: flex;
flex-direction: column
}
._content[data-v-c4279e6b] > :not([hidden]) ~ :not([hidden]) {
--tw-space-y-reverse: 0;
margin-top: calc(0.5rem * calc(1 - var(--tw-space-y-reverse)));
margin-bottom: calc(0.5rem * var(--tw-space-y-reverse))
}
._footer[data-v-c4279e6b] {
display: flex;
flex-direction: column;
align-items: flex-end;
padding-top: 1rem
}
.node-lib-node-container[data-v-da9a8962] { .node-lib-node-container[data-v-da9a8962] {
height: 100%; height: 100%;
width: 100% width: 100%
} }
.p-selectbutton .p-button[data-v-4b8adc78] { .p-selectbutton .p-button[data-v-bd06e12b] {
padding: 0.5rem; padding: 0.5rem;
} }
.p-selectbutton .p-button .pi[data-v-4b8adc78] { .p-selectbutton .p-button .pi[data-v-bd06e12b] {
font-size: 1.5rem; font-size: 1.5rem;
} }
.field[data-v-4b8adc78] { .field[data-v-bd06e12b] {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
gap: 0.5rem; gap: 0.5rem;
} }
.color-picker-container[data-v-4b8adc78] { .color-picker-container[data-v-bd06e12b] {
display: flex; display: flex;
align-items: center; align-items: center;
gap: 0.5rem; gap: 0.5rem;
} }
.scroll-container[data-v-ad33a347] { .scroll-container {
&[data-v-ad33a347] {
height: 100%; height: 100%;
overflow-y: auto; overflow-y: auto;
/* Firefox */ /* Firefox */
scrollbar-width: none; scrollbar-width: none;
}
&[data-v-ad33a347]::-webkit-scrollbar { &[data-v-ad33a347]::-webkit-scrollbar {
width: 1px; width: 1px;
} }
@ -4767,10 +4985,10 @@ audio.comfy-audio.empty-audio-widget {
} }
} }
.comfy-image-wrap[data-v-ffe66146] { .comfy-image-wrap[data-v-a748ccd8] {
display: contents; display: contents;
} }
.comfy-image-blur[data-v-ffe66146] { .comfy-image-blur[data-v-a748ccd8] {
position: absolute; position: absolute;
top: 0; top: 0;
left: 0; left: 0;
@ -4779,7 +4997,7 @@ audio.comfy-audio.empty-audio-widget {
-o-object-fit: cover; -o-object-fit: cover;
object-fit: cover; object-fit: cover;
} }
.comfy-image-main[data-v-ffe66146] { .comfy-image-main[data-v-a748ccd8] {
width: 100%; width: 100%;
height: 100%; height: 100%;
-o-object-fit: cover; -o-object-fit: cover;
@ -4788,19 +5006,19 @@ audio.comfy-audio.empty-audio-widget {
object-position: center; object-position: center;
z-index: 1; z-index: 1;
} }
.contain .comfy-image-wrap[data-v-ffe66146] { .contain .comfy-image-wrap[data-v-a748ccd8] {
position: relative; position: relative;
width: 100%; width: 100%;
height: 100%; height: 100%;
} }
.contain .comfy-image-main[data-v-ffe66146] { .contain .comfy-image-main[data-v-a748ccd8] {
-o-object-fit: contain; -o-object-fit: contain;
object-fit: contain; object-fit: contain;
-webkit-backdrop-filter: blur(10px); -webkit-backdrop-filter: blur(10px);
backdrop-filter: blur(10px); backdrop-filter: blur(10px);
position: absolute; position: absolute;
} }
.broken-image-placeholder[data-v-ffe66146] { .broken-image-placeholder[data-v-a748ccd8] {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
align-items: center; align-items: center;
@ -4809,7 +5027,7 @@ audio.comfy-audio.empty-audio-widget {
height: 100%; height: 100%;
margin: 2rem; margin: 2rem;
} }
.broken-image-placeholder i[data-v-ffe66146] { .broken-image-placeholder i[data-v-a748ccd8] {
font-size: 3rem; font-size: 3rem;
margin-bottom: 0.5rem; margin-bottom: 0.5rem;
} }
@ -4827,7 +5045,7 @@ img.galleria-image {
z-index: 1; z-index: 1;
} }
.result-container[data-v-61515e14] { .result-container[data-v-2403edc6] {
width: 100%; width: 100%;
height: 100%; height: 100%;
aspect-ratio: 1 / 1; aspect-ratio: 1 / 1;
@ -4837,7 +5055,7 @@ img.galleria-image {
justify-content: center; justify-content: center;
align-items: center; align-items: center;
} }
.preview-mask[data-v-61515e14] { .preview-mask[data-v-2403edc6] {
position: absolute; position: absolute;
left: 50%; left: 50%;
top: 50%; top: 50%;
@ -4849,7 +5067,7 @@ img.galleria-image {
transition: opacity 0.3s ease; transition: opacity 0.3s ease;
z-index: 1; z-index: 1;
} }
.result-container:hover .preview-mask[data-v-61515e14] { .result-container:hover .preview-mask[data-v-2403edc6] {
opacity: 1; opacity: 1;
} }

File diff suppressed because one or more lines are too long

27
web/assets/index-DXE47DZl.js generated vendored Normal file
View File

@ -0,0 +1,27 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { bZ as script$1, o as openBlock, f as createElementBlock, as as mergeProps, m as createBaseVNode } from "./index-DqqhYDnY.js";
var script = {
name: "BarsIcon",
"extends": script$1
};
function render(_ctx, _cache, $props, $setup, $data, $options) {
return openBlock(), createElementBlock("svg", mergeProps({
width: "14",
height: "14",
viewBox: "0 0 14 14",
fill: "none",
xmlns: "http://www.w3.org/2000/svg"
}, _ctx.pti()), _cache[0] || (_cache[0] = [createBaseVNode("path", {
"fill-rule": "evenodd",
"clip-rule": "evenodd",
d: "M13.3226 3.6129H0.677419C0.497757 3.6129 0.325452 3.54152 0.198411 3.41448C0.0713707 3.28744 0 3.11514 0 2.93548C0 2.75581 0.0713707 2.58351 0.198411 2.45647C0.325452 2.32943 0.497757 2.25806 0.677419 2.25806H13.3226C13.5022 2.25806 13.6745 2.32943 13.8016 2.45647C13.9286 2.58351 14 2.75581 14 2.93548C14 3.11514 13.9286 3.28744 13.8016 3.41448C13.6745 3.54152 13.5022 3.6129 13.3226 3.6129ZM13.3226 7.67741H0.677419C0.497757 7.67741 0.325452 7.60604 0.198411 7.479C0.0713707 7.35196 0 7.17965 0 6.99999C0 6.82033 0.0713707 6.64802 0.198411 6.52098C0.325452 6.39394 0.497757 6.32257 0.677419 6.32257H13.3226C13.5022 6.32257 13.6745 6.39394 13.8016 6.52098C13.9286 6.64802 14 6.82033 14 6.99999C14 7.17965 13.9286 7.35196 13.8016 7.479C13.6745 7.60604 13.5022 7.67741 13.3226 7.67741ZM0.677419 11.7419H13.3226C13.5022 11.7419 13.6745 11.6706 13.8016 11.5435C13.9286 11.4165 14 11.2442 14 11.0645C14 10.8848 13.9286 10.7125 13.8016 10.5855C13.6745 10.4585 13.5022 10.3871 13.3226 10.3871H0.677419C0.497757 10.3871 0.325452 10.4585 0.198411 10.5855C0.0713707 10.7125 0 10.8848 0 11.0645C0 11.2442 0.0713707 11.4165 0.198411 11.5435C0.325452 11.6706 0.497757 11.7419 0.677419 11.7419Z",
fill: "currentColor"
}, null, -1)]), 16);
}
__name(render, "render");
script.render = render;
export {
script as s
};
//# sourceMappingURL=index-DXE47DZl.js.map

File diff suppressed because one or more lines are too long

173
web/assets/index-jXPKy3pP.js generated vendored
View File

@ -1,173 +0,0 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { B as BaseStyle, q as script$2, ak as UniqueComponentId, c9 as script$4, l as script$5, S as Ripple, aB as resolveComponent, o as openBlock, f as createElementBlock, D as mergeProps, H as createBaseVNode, J as renderSlot, T as normalizeClass, X as toDisplayString, I as createCommentVNode, k as createBlock, M as withCtx, G as resolveDynamicComponent, N as createVNode, aC as Transition, i as withDirectives, v as vShow } from "./index-DjNHn37O.js";
import { s as script$3 } from "./index-5HFeZax4.js";
var theme = /* @__PURE__ */ __name(function theme2(_ref) {
var dt = _ref.dt;
return "\n.p-panel {\n border: 1px solid ".concat(dt("panel.border.color"), ";\n border-radius: ").concat(dt("panel.border.radius"), ";\n background: ").concat(dt("panel.background"), ";\n color: ").concat(dt("panel.color"), ";\n}\n\n.p-panel-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n padding: ").concat(dt("panel.header.padding"), ";\n background: ").concat(dt("panel.header.background"), ";\n color: ").concat(dt("panel.header.color"), ";\n border-style: solid;\n border-width: ").concat(dt("panel.header.border.width"), ";\n border-color: ").concat(dt("panel.header.border.color"), ";\n border-radius: ").concat(dt("panel.header.border.radius"), ";\n}\n\n.p-panel-toggleable .p-panel-header {\n padding: ").concat(dt("panel.toggleable.header.padding"), ";\n}\n\n.p-panel-title {\n line-height: 1;\n font-weight: ").concat(dt("panel.title.font.weight"), ";\n}\n\n.p-panel-content {\n padding: ").concat(dt("panel.content.padding"), ";\n}\n\n.p-panel-footer {\n padding: ").concat(dt("panel.footer.padding"), ";\n}\n");
}, "theme");
var classes = {
root: /* @__PURE__ */ __name(function root(_ref2) {
var props = _ref2.props;
return ["p-panel p-component", {
"p-panel-toggleable": props.toggleable
}];
}, "root"),
header: "p-panel-header",
title: "p-panel-title",
headerActions: "p-panel-header-actions",
pcToggleButton: "p-panel-toggle-button",
contentContainer: "p-panel-content-container",
content: "p-panel-content",
footer: "p-panel-footer"
};
var PanelStyle = BaseStyle.extend({
name: "panel",
theme,
classes
});
var script$1 = {
name: "BasePanel",
"extends": script$2,
props: {
header: String,
toggleable: Boolean,
collapsed: Boolean,
toggleButtonProps: {
type: Object,
"default": /* @__PURE__ */ __name(function _default() {
return {
severity: "secondary",
text: true,
rounded: true
};
}, "_default")
}
},
style: PanelStyle,
provide: /* @__PURE__ */ __name(function provide() {
return {
$pcPanel: this,
$parentInstance: this
};
}, "provide")
};
var script = {
name: "Panel",
"extends": script$1,
inheritAttrs: false,
emits: ["update:collapsed", "toggle"],
data: /* @__PURE__ */ __name(function data() {
return {
id: this.$attrs.id,
d_collapsed: this.collapsed
};
}, "data"),
watch: {
"$attrs.id": /* @__PURE__ */ __name(function $attrsId(newValue) {
this.id = newValue || UniqueComponentId();
}, "$attrsId"),
collapsed: /* @__PURE__ */ __name(function collapsed(newValue) {
this.d_collapsed = newValue;
}, "collapsed")
},
mounted: /* @__PURE__ */ __name(function mounted() {
this.id = this.id || UniqueComponentId();
}, "mounted"),
methods: {
toggle: /* @__PURE__ */ __name(function toggle(event) {
this.d_collapsed = !this.d_collapsed;
this.$emit("update:collapsed", this.d_collapsed);
this.$emit("toggle", {
originalEvent: event,
value: this.d_collapsed
});
}, "toggle"),
onKeyDown: /* @__PURE__ */ __name(function onKeyDown(event) {
if (event.code === "Enter" || event.code === "NumpadEnter" || event.code === "Space") {
this.toggle(event);
event.preventDefault();
}
}, "onKeyDown")
},
computed: {
buttonAriaLabel: /* @__PURE__ */ __name(function buttonAriaLabel() {
return this.toggleButtonProps && this.toggleButtonProps.ariaLabel ? this.toggleButtonProps.ariaLabel : this.header;
}, "buttonAriaLabel")
},
components: {
PlusIcon: script$3,
MinusIcon: script$4,
Button: script$5
},
directives: {
ripple: Ripple
}
};
var _hoisted_1 = ["id"];
var _hoisted_2 = ["id", "aria-labelledby"];
function render(_ctx, _cache, $props, $setup, $data, $options) {
var _component_Button = resolveComponent("Button");
return openBlock(), createElementBlock("div", mergeProps({
"class": _ctx.cx("root")
}, _ctx.ptmi("root")), [createBaseVNode("div", mergeProps({
"class": _ctx.cx("header")
}, _ctx.ptm("header")), [renderSlot(_ctx.$slots, "header", {
id: $data.id + "_header",
"class": normalizeClass(_ctx.cx("title"))
}, function() {
return [_ctx.header ? (openBlock(), createElementBlock("span", mergeProps({
key: 0,
id: $data.id + "_header",
"class": _ctx.cx("title")
}, _ctx.ptm("title")), toDisplayString(_ctx.header), 17, _hoisted_1)) : createCommentVNode("", true)];
}), createBaseVNode("div", mergeProps({
"class": _ctx.cx("headerActions")
}, _ctx.ptm("headerActions")), [renderSlot(_ctx.$slots, "icons"), _ctx.toggleable ? (openBlock(), createBlock(_component_Button, mergeProps({
key: 0,
id: $data.id + "_header",
"class": _ctx.cx("pcToggleButton"),
"aria-label": $options.buttonAriaLabel,
"aria-controls": $data.id + "_content",
"aria-expanded": !$data.d_collapsed,
unstyled: _ctx.unstyled,
onClick: $options.toggle,
onKeydown: $options.onKeyDown
}, _ctx.toggleButtonProps, {
pt: _ctx.ptm("pcToggleButton")
}), {
icon: withCtx(function(slotProps) {
return [renderSlot(_ctx.$slots, _ctx.$slots.toggleicon ? "toggleicon" : "togglericon", {
collapsed: $data.d_collapsed
}, function() {
return [(openBlock(), createBlock(resolveDynamicComponent($data.d_collapsed ? "PlusIcon" : "MinusIcon"), mergeProps({
"class": slotProps["class"]
}, _ctx.ptm("pcToggleButton")["icon"]), null, 16, ["class"]))];
})];
}),
_: 3
}, 16, ["id", "class", "aria-label", "aria-controls", "aria-expanded", "unstyled", "onClick", "onKeydown", "pt"])) : createCommentVNode("", true)], 16)], 16), createVNode(Transition, mergeProps({
name: "p-toggleable-content"
}, _ctx.ptm("transition")), {
"default": withCtx(function() {
return [withDirectives(createBaseVNode("div", mergeProps({
id: $data.id + "_content",
"class": _ctx.cx("contentContainer"),
role: "region",
"aria-labelledby": $data.id + "_header"
}, _ctx.ptm("contentContainer")), [createBaseVNode("div", mergeProps({
"class": _ctx.cx("content")
}, _ctx.ptm("content")), [renderSlot(_ctx.$slots, "default")], 16), _ctx.$slots.footer ? (openBlock(), createElementBlock("div", mergeProps({
key: 0,
"class": _ctx.cx("footer")
}, _ctx.ptm("footer")), [renderSlot(_ctx.$slots, "footer")], 16)) : createCommentVNode("", true)], 16, _hoisted_2), [[vShow, !$data.d_collapsed]])];
}),
_: 3
}, 16)], 16);
}
__name(render, "render");
script.render = render;
export {
script as s
};
//# sourceMappingURL=index-jXPKy3pP.js.map

View File

@ -1,6 +1,6 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { a$ as useKeybindingStore, a2 as useCommandStore, a as useSettingStore, cq as KeyComboImpl, cr as KeybindingImpl } from "./index-DjNHn37O.js"; import { an as useKeybindingStore, L as useCommandStore, a as useSettingStore, dp as KeyComboImpl, dq as KeybindingImpl } from "./index-DqqhYDnY.js";
const CORE_KEYBINDINGS = [ const CORE_KEYBINDINGS = [
{ {
combo: { combo: {
@ -96,7 +96,7 @@ const CORE_KEYBINDINGS = [
alt: true alt: true
}, },
commandId: "Comfy.Canvas.ZoomIn", commandId: "Comfy.Canvas.ZoomIn",
targetSelector: "#graph-canvas" targetElementId: "graph-canvas"
}, },
{ {
combo: { combo: {
@ -105,7 +105,7 @@ const CORE_KEYBINDINGS = [
shift: true shift: true
}, },
commandId: "Comfy.Canvas.ZoomIn", commandId: "Comfy.Canvas.ZoomIn",
targetSelector: "#graph-canvas" targetElementId: "graph-canvas"
}, },
// For number pad '+' // For number pad '+'
{ {
@ -114,7 +114,7 @@ const CORE_KEYBINDINGS = [
alt: true alt: true
}, },
commandId: "Comfy.Canvas.ZoomIn", commandId: "Comfy.Canvas.ZoomIn",
targetSelector: "#graph-canvas" targetElementId: "graph-canvas"
}, },
{ {
combo: { combo: {
@ -122,21 +122,21 @@ const CORE_KEYBINDINGS = [
alt: true alt: true
}, },
commandId: "Comfy.Canvas.ZoomOut", commandId: "Comfy.Canvas.ZoomOut",
targetSelector: "#graph-canvas" targetElementId: "graph-canvas"
}, },
{ {
combo: { combo: {
key: "." key: "."
}, },
commandId: "Comfy.Canvas.FitView", commandId: "Comfy.Canvas.FitView",
targetSelector: "#graph-canvas" targetElementId: "graph-canvas"
}, },
{ {
combo: { combo: {
key: "p" key: "p"
}, },
commandId: "Comfy.Canvas.ToggleSelected.Pin", commandId: "Comfy.Canvas.ToggleSelected.Pin",
targetSelector: "#graph-canvas" targetElementId: "graph-canvas"
}, },
{ {
combo: { combo: {
@ -144,7 +144,7 @@ const CORE_KEYBINDINGS = [
alt: true alt: true
}, },
commandId: "Comfy.Canvas.ToggleSelectedNodes.Collapse", commandId: "Comfy.Canvas.ToggleSelectedNodes.Collapse",
targetSelector: "#graph-canvas" targetElementId: "graph-canvas"
}, },
{ {
combo: { combo: {
@ -152,7 +152,7 @@ const CORE_KEYBINDINGS = [
ctrl: true ctrl: true
}, },
commandId: "Comfy.Canvas.ToggleSelectedNodes.Bypass", commandId: "Comfy.Canvas.ToggleSelectedNodes.Bypass",
targetSelector: "#graph-canvas" targetElementId: "graph-canvas"
}, },
{ {
combo: { combo: {
@ -160,7 +160,7 @@ const CORE_KEYBINDINGS = [
ctrl: true ctrl: true
}, },
commandId: "Comfy.Canvas.ToggleSelectedNodes.Mute", commandId: "Comfy.Canvas.ToggleSelectedNodes.Mute",
targetSelector: "#graph-canvas" targetElementId: "graph-canvas"
}, },
{ {
combo: { combo: {
@ -190,7 +190,7 @@ const useKeybindingService = /* @__PURE__ */ __name(() => {
return; return;
} }
const keybinding = keybindingStore.getKeybinding(keyCombo); const keybinding = keybindingStore.getKeybinding(keyCombo);
if (keybinding && keybinding.targetSelector !== "#graph-canvas") { if (keybinding && keybinding.targetElementId !== "graph-canvas") {
event.preventDefault(); event.preventDefault();
await commandStore.execute(keybinding.commandId); await commandStore.execute(keybinding.commandId);
return; return;
@ -247,4 +247,4 @@ const useKeybindingService = /* @__PURE__ */ __name(() => {
export { export {
useKeybindingService as u useKeybindingService as u
}; };
//# sourceMappingURL=keybindingService-Bx7YdkXn.js.map //# sourceMappingURL=keybindingService-DEgCutrm.js.map

View File

@ -1,6 +1,6 @@
var __defProp = Object.defineProperty; var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true }); var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { $ as defineStore, ab as ref, c as computed } from "./index-DjNHn37O.js"; import { I as defineStore, U as ref, c as computed } from "./index-DqqhYDnY.js";
const useServerConfigStore = defineStore("serverConfig", () => { const useServerConfigStore = defineStore("serverConfig", () => {
const serverConfigById = ref({}); const serverConfigById = ref({});
const serverConfigs = computed(() => { const serverConfigs = computed(() => {
@ -87,4 +87,4 @@ const useServerConfigStore = defineStore("serverConfig", () => {
export { export {
useServerConfigStore as u useServerConfigStore as u
}; };
//# sourceMappingURL=serverConfigStore-CvyKFVuP.js.map //# sourceMappingURL=serverConfigStore-Kb5DJVFt.js.map

16
web/assets/uvMirrors-B-HKMf6X.js generated vendored Normal file
View File

@ -0,0 +1,16 @@
const PYTHON_MIRROR = {
settingId: "Comfy-Desktop.UV.PythonInstallMirror",
mirror: "https://github.com/astral-sh/python-build-standalone/releases/download",
fallbackMirror: "https://bgithub.xyz/astral-sh/python-build-standalone/releases/download",
validationPathSuffix: "/20250115/cpython-3.10.16+20250115-aarch64-apple-darwin-debug-full.tar.zst.sha256"
};
const PYPI_MIRROR = {
settingId: "Comfy-Desktop.UV.PypiInstallMirror",
mirror: "https://pypi.org/simple/",
fallbackMirror: "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple"
};
export {
PYTHON_MIRROR as P,
PYPI_MIRROR as a
};
//# sourceMappingURL=uvMirrors-B-HKMf6X.js.map

4
web/index.html vendored
View File

@ -6,8 +6,8 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no"> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
<link rel="stylesheet" type="text/css" href="user.css" /> <link rel="stylesheet" type="text/css" href="user.css" />
<link rel="stylesheet" type="text/css" href="materialdesignicons.min.css" /> <link rel="stylesheet" type="text/css" href="materialdesignicons.min.css" />
<script type="module" crossorigin src="./assets/index-DjNHn37O.js"></script> <script type="module" crossorigin src="./assets/index-DqqhYDnY.js"></script>
<link rel="stylesheet" crossorigin href="./assets/index-t-sFBuUC.css"> <link rel="stylesheet" crossorigin href="./assets/index-C1Hb_Yo9.css">
</head> </head>
<body class="litegraph grid"> <body class="litegraph grid">
<div id="vue-app"></div> <div id="vue-app"></div>

View File

@ -266,7 +266,7 @@
], ],
"properties": {}, "properties": {},
"widgets_values": [ "widgets_values": [
"v1-5-pruned-emaonly.safetensors" "v1-5-pruned-emaonly-fp16.safetensors"
] ]
} }
], ],
@ -349,8 +349,8 @@
"extra": {}, "extra": {},
"version": 0.4, "version": 0.4,
"models": [{ "models": [{
"name": "v1-5-pruned-emaonly.safetensors", "name": "v1-5-pruned-emaonly-fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/stable-diffusion-v1-5-archive/resolve/main/v1-5-pruned-emaonly.safetensors?download=true", "url": "https://huggingface.co/Comfy-Org/stable-diffusion-v1-5-archive/resolve/main/v1-5-pruned-emaonly-fp16.safetensors?download=true",
"directory": "checkpoints" "directory": "checkpoints"
}] }]
} }