mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-01-11 02:15:17 +00:00
Merge branch 'add_sample_sigmas' into hooks_part2
This commit is contained in:
commit
db2d7ad9ba
58
.github/workflows/update-frontend.yml
vendored
Normal file
58
.github/workflows/update-frontend.yml
vendored
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
name: Update Frontend Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
version:
|
||||||
|
description: "Frontend version to update to (e.g., 1.0.0)"
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
update-frontend:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout ComfyUI
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
- name: Install requirements
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
|
||||||
|
pip install -r requirements.txt
|
||||||
|
pip install wait-for-it
|
||||||
|
# Frontend asset will be downloaded to ComfyUI/web_custom_versions/Comfy-Org_ComfyUI_frontend/{version}
|
||||||
|
- name: Start ComfyUI server
|
||||||
|
run: |
|
||||||
|
python main.py --cpu --front-end-version Comfy-Org/ComfyUI_frontend@${{ github.event.inputs.version }} 2>&1 | tee console_output.log &
|
||||||
|
wait-for-it --service 127.0.0.1:8188 -t 30
|
||||||
|
- name: Configure Git
|
||||||
|
run: |
|
||||||
|
git config --global user.name "GitHub Action"
|
||||||
|
git config --global user.email "action@github.com"
|
||||||
|
# Replace existing frontend content with the new version and remove .js.map files
|
||||||
|
# See https://github.com/Comfy-Org/ComfyUI_frontend/issues/2145 for why we remove .js.map files
|
||||||
|
- name: Update frontend content
|
||||||
|
run: |
|
||||||
|
rm -rf web/
|
||||||
|
cp -r web_custom_versions/Comfy-Org_ComfyUI_frontend/${{ github.event.inputs.version }} web/
|
||||||
|
rm web/**/*.js.map
|
||||||
|
- name: Create Pull Request
|
||||||
|
uses: peter-evans/create-pull-request@v7
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.PR_BOT_PAT }}
|
||||||
|
commit-message: "Update frontend to v${{ github.event.inputs.version }}"
|
||||||
|
title: "Frontend Update: v${{ github.event.inputs.version }}"
|
||||||
|
body: |
|
||||||
|
Automated PR to update frontend content to version ${{ github.event.inputs.version }}
|
||||||
|
|
||||||
|
This PR was created automatically by the frontend update workflow.
|
||||||
|
branch: release-${{ github.event.inputs.version }}
|
||||||
|
base: master
|
||||||
|
labels: Frontend,dependencies
|
@ -17,7 +17,7 @@
|
|||||||
/app/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata
|
/app/ @yoland68 @robinjhuang @huchenlei @webfiltered @pythongosssss @ltdrdata
|
||||||
|
|
||||||
# Frontend assets
|
# Frontend assets
|
||||||
/web/ @huchenlei @webfiltered @pythongosssss
|
/web/ @huchenlei @webfiltered @pythongosssss @yoland68 @robinjhuang
|
||||||
|
|
||||||
# Extra nodes
|
# Extra nodes
|
||||||
/comfy_extras/ @yoland68 @robinjhuang @huchenlei @pythongosssss @ltdrdata @Kosinkadink
|
/comfy_extras/ @yoland68 @robinjhuang @huchenlei @pythongosssss @ltdrdata @Kosinkadink
|
||||||
|
@ -492,7 +492,7 @@ class HookKeyframeGroup:
|
|||||||
return False
|
return False
|
||||||
if curr_t == self._curr_t:
|
if curr_t == self._curr_t:
|
||||||
return False
|
return False
|
||||||
max_sigma = torch.max(transformer_options["sigmas"])
|
max_sigma = torch.max(transformer_options["sample_sigmas"])
|
||||||
prev_index = self._current_index
|
prev_index = self._current_index
|
||||||
prev_strength = self._current_strength
|
prev_strength = self._current_strength
|
||||||
# if met guaranteed steps, look for next keyframe in case need to switch
|
# if met guaranteed steps, look for next keyframe in case need to switch
|
||||||
|
@ -1128,10 +1128,6 @@ def unload_all_models():
|
|||||||
free_memory(1e30, get_torch_device())
|
free_memory(1e30, get_torch_device())
|
||||||
|
|
||||||
|
|
||||||
def resolve_lowvram_weight(weight, model, key): #TODO: remove
|
|
||||||
logging.warning("The comfy.model_management.resolve_lowvram_weight function will be removed soon, please stop using it.")
|
|
||||||
return weight
|
|
||||||
|
|
||||||
#TODO: might be cleaner to put this somewhere else
|
#TODO: might be cleaner to put this somewhere else
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
|
@ -849,7 +849,7 @@ class CFGGuider:
|
|||||||
self.conds = process_conds(self.inner_model, noise, self.conds, device, latent_image, denoise_mask, seed)
|
self.conds = process_conds(self.inner_model, noise, self.conds, device, latent_image, denoise_mask, seed)
|
||||||
|
|
||||||
extra_model_options = comfy.model_patcher.create_model_options_clone(self.model_options)
|
extra_model_options = comfy.model_patcher.create_model_options_clone(self.model_options)
|
||||||
extra_model_options.setdefault("transformer_options", {})["sigmas"] = sigmas
|
extra_model_options.setdefault("transformer_options", {})["sample_sigmas"] = sigmas
|
||||||
extra_args = {"model_options": extra_model_options, "seed": seed}
|
extra_args = {"model_options": extra_model_options, "seed": seed}
|
||||||
|
|
||||||
executor = comfy.patcher_extension.WrapperExecutor.new_class_executor(
|
executor = comfy.patcher_extension.WrapperExecutor.new_class_executor(
|
||||||
|
22
nodes.py
22
nodes.py
@ -913,6 +913,9 @@ class CLIPLoader:
|
|||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ),
|
return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ),
|
||||||
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart"], ),
|
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart"], ),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"device": (["default", "cpu"], {"advanced": True}),
|
||||||
}}
|
}}
|
||||||
RETURN_TYPES = ("CLIP",)
|
RETURN_TYPES = ("CLIP",)
|
||||||
FUNCTION = "load_clip"
|
FUNCTION = "load_clip"
|
||||||
@ -921,7 +924,7 @@ class CLIPLoader:
|
|||||||
|
|
||||||
DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 / clip-g / clip-l\nstable_audio: t5\nmochi: t5"
|
DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 / clip-g / clip-l\nstable_audio: t5\nmochi: t5"
|
||||||
|
|
||||||
def load_clip(self, clip_name, type="stable_diffusion"):
|
def load_clip(self, clip_name, type="stable_diffusion", device="default"):
|
||||||
if type == "stable_cascade":
|
if type == "stable_cascade":
|
||||||
clip_type = comfy.sd.CLIPType.STABLE_CASCADE
|
clip_type = comfy.sd.CLIPType.STABLE_CASCADE
|
||||||
elif type == "sd3":
|
elif type == "sd3":
|
||||||
@ -937,8 +940,12 @@ class CLIPLoader:
|
|||||||
else:
|
else:
|
||||||
clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION
|
clip_type = comfy.sd.CLIPType.STABLE_DIFFUSION
|
||||||
|
|
||||||
|
model_options = {}
|
||||||
|
if device == "cpu":
|
||||||
|
model_options["load_device"] = model_options["offload_device"] = torch.device("cpu")
|
||||||
|
|
||||||
clip_path = folder_paths.get_full_path_or_raise("text_encoders", clip_name)
|
clip_path = folder_paths.get_full_path_or_raise("text_encoders", clip_name)
|
||||||
clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type)
|
clip = comfy.sd.load_clip(ckpt_paths=[clip_path], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type, model_options=model_options)
|
||||||
return (clip,)
|
return (clip,)
|
||||||
|
|
||||||
class DualCLIPLoader:
|
class DualCLIPLoader:
|
||||||
@ -947,6 +954,9 @@ class DualCLIPLoader:
|
|||||||
return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ),
|
return {"required": { "clip_name1": (folder_paths.get_filename_list("text_encoders"), ),
|
||||||
"clip_name2": (folder_paths.get_filename_list("text_encoders"), ),
|
"clip_name2": (folder_paths.get_filename_list("text_encoders"), ),
|
||||||
"type": (["sdxl", "sd3", "flux", "hunyuan_video"], ),
|
"type": (["sdxl", "sd3", "flux", "hunyuan_video"], ),
|
||||||
|
},
|
||||||
|
"optional": {
|
||||||
|
"device": (["default", "cpu"], {"advanced": True}),
|
||||||
}}
|
}}
|
||||||
RETURN_TYPES = ("CLIP",)
|
RETURN_TYPES = ("CLIP",)
|
||||||
FUNCTION = "load_clip"
|
FUNCTION = "load_clip"
|
||||||
@ -955,7 +965,7 @@ class DualCLIPLoader:
|
|||||||
|
|
||||||
DESCRIPTION = "[Recipes]\n\nsdxl: clip-l, clip-g\nsd3: clip-l, clip-g / clip-l, t5 / clip-g, t5\nflux: clip-l, t5"
|
DESCRIPTION = "[Recipes]\n\nsdxl: clip-l, clip-g\nsd3: clip-l, clip-g / clip-l, t5 / clip-g, t5\nflux: clip-l, t5"
|
||||||
|
|
||||||
def load_clip(self, clip_name1, clip_name2, type):
|
def load_clip(self, clip_name1, clip_name2, type, device="default"):
|
||||||
clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1)
|
clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1)
|
||||||
clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2)
|
clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2)
|
||||||
if type == "sdxl":
|
if type == "sdxl":
|
||||||
@ -967,7 +977,11 @@ class DualCLIPLoader:
|
|||||||
elif type == "hunyuan_video":
|
elif type == "hunyuan_video":
|
||||||
clip_type = comfy.sd.CLIPType.HUNYUAN_VIDEO
|
clip_type = comfy.sd.CLIPType.HUNYUAN_VIDEO
|
||||||
|
|
||||||
clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type)
|
model_options = {}
|
||||||
|
if device == "cpu":
|
||||||
|
model_options["load_device"] = model_options["offload_device"] = torch.device("cpu")
|
||||||
|
|
||||||
|
clip = comfy.sd.load_clip(ckpt_paths=[clip_path1, clip_path2], embedding_directory=folder_paths.get_folder_paths("embeddings"), clip_type=clip_type, model_options=model_options)
|
||||||
return (clip,)
|
return (clip,)
|
||||||
|
|
||||||
class CLIPVisionLoader:
|
class CLIPVisionLoader:
|
||||||
|
Loading…
Reference in New Issue
Block a user